[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/Kconfig b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/Kconfig
new file mode 100644
index 0000000..e4dc7c7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/Kconfig
@@ -0,0 +1,8 @@
+config SCSI_QLA_ISCSI
+	tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
+	depends on PCI && SCSI && NET
+	select SCSI_ISCSI_ATTRS
+	select ISCSI_BOOT_SYSFS
+	---help---
+	This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
+	and 8032 (ISP83XX) iSCSI host adapter family.
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/Makefile b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/Makefile
new file mode 100644
index 0000000..4230977
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/Makefile
@@ -0,0 +1,5 @@
+qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
+		ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
+
+obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
+
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_83xx.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_83xx.c
new file mode 100644
index 0000000..638f72c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -0,0 +1,1596 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)   2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <linux/ratelimit.h>
+
+#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
+{
+	return readl((void __iomem *)(ha->nx_pcibase + addr));
+}
+
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
+{
+	writel(val, (void __iomem *)(ha->nx_pcibase + addr));
+}
+
+static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
+{
+	uint32_t val;
+	int ret_val = QLA_SUCCESS;
+
+	qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
+	val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
+	if (val != addr) {
+		ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
+			   __func__, addr, val);
+		ret_val = QLA_ERROR;
+	}
+
+	return ret_val;
+}
+
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+			      uint32_t *data)
+{
+	int ret_val;
+
+	ret_val = qla4_83xx_set_win_base(ha, addr);
+
+	if (ret_val == QLA_SUCCESS) {
+		*data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
+	} else {
+		*data = 0xffffffff;
+		ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
+			   __func__, addr);
+	}
+
+	return ret_val;
+}
+
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+			      uint32_t data)
+{
+	int ret_val;
+
+	ret_val = qla4_83xx_set_win_base(ha, addr);
+
+	if (ret_val == QLA_SUCCESS)
+		qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
+	else
+		ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
+			   __func__, addr, data);
+
+	return ret_val;
+}
+
+static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
+{
+	int lock_owner;
+	int timeout = 0;
+	uint32_t lock_status = 0;
+	int ret_val = QLA_SUCCESS;
+
+	while (lock_status == 0) {
+		lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
+		if (lock_status)
+			break;
+
+		if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
+			lock_owner = qla4_83xx_rd_reg(ha,
+						      QLA83XX_FLASH_LOCK_ID);
+			ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
+				   __func__, ha->func_num, lock_owner);
+			ret_val = QLA_ERROR;
+			break;
+		}
+		msleep(20);
+	}
+
+	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
+	return ret_val;
+}
+
+static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
+{
+	/* Reading FLASH_UNLOCK register unlocks the Flash */
+	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
+	qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
+}
+
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+			     uint8_t *p_data, int u32_word_count)
+{
+	int i;
+	uint32_t u32_word;
+	uint32_t addr = flash_addr;
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla4_83xx_flash_lock(ha);
+	if (ret_val == QLA_ERROR)
+		goto exit_lock_error;
+
+	if (addr & 0x03) {
+		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+			   __func__, addr);
+		ret_val = QLA_ERROR;
+		goto exit_flash_read;
+	}
+
+	for (i = 0; i < u32_word_count; i++) {
+		ret_val = qla4_83xx_wr_reg_indirect(ha,
+						    QLA83XX_FLASH_DIRECT_WINDOW,
+						    (addr & 0xFFFF0000));
+		if (ret_val == QLA_ERROR) {
+			ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
+				   __func__, addr);
+			goto exit_flash_read;
+		}
+
+		ret_val = qla4_83xx_rd_reg_indirect(ha,
+						QLA83XX_FLASH_DIRECT_DATA(addr),
+						&u32_word);
+		if (ret_val == QLA_ERROR) {
+			ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+				   __func__, addr);
+			goto exit_flash_read;
+		}
+
+		*(__le32 *)p_data = le32_to_cpu(u32_word);
+		p_data = p_data + 4;
+		addr = addr + 4;
+	}
+
+exit_flash_read:
+	qla4_83xx_flash_unlock(ha);
+
+exit_lock_error:
+	return ret_val;
+}
+
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+				      uint32_t flash_addr, uint8_t *p_data,
+				      int u32_word_count)
+{
+	uint32_t i;
+	uint32_t u32_word;
+	uint32_t flash_offset;
+	uint32_t addr = flash_addr;
+	int ret_val = QLA_SUCCESS;
+
+	flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
+
+	if (addr & 0x3) {
+		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
+			   __func__, addr);
+		ret_val = QLA_ERROR;
+		goto exit_lockless_read;
+	}
+
+	ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
+					    addr);
+	if (ret_val == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+			   __func__, addr);
+		goto exit_lockless_read;
+	}
+
+	/* Check if data is spread across multiple sectors  */
+	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+	    (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+
+		/* Multi sector read */
+		for (i = 0; i < u32_word_count; i++) {
+			ret_val = qla4_83xx_rd_reg_indirect(ha,
+						QLA83XX_FLASH_DIRECT_DATA(addr),
+						&u32_word);
+			if (ret_val == QLA_ERROR) {
+				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+					   __func__, addr);
+				goto exit_lockless_read;
+			}
+
+			*(__le32 *)p_data  = le32_to_cpu(u32_word);
+			p_data = p_data + 4;
+			addr = addr + 4;
+			flash_offset = flash_offset + 4;
+
+			if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
+				/* This write is needed once for each sector */
+				ret_val = qla4_83xx_wr_reg_indirect(ha,
+						   QLA83XX_FLASH_DIRECT_WINDOW,
+						   addr);
+				if (ret_val == QLA_ERROR) {
+					ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+						   __func__, addr);
+					goto exit_lockless_read;
+				}
+				flash_offset = 0;
+			}
+		}
+	} else {
+		/* Single sector read */
+		for (i = 0; i < u32_word_count; i++) {
+			ret_val = qla4_83xx_rd_reg_indirect(ha,
+						QLA83XX_FLASH_DIRECT_DATA(addr),
+						&u32_word);
+			if (ret_val == QLA_ERROR) {
+				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
+					   __func__, addr);
+				goto exit_lockless_read;
+			}
+
+			*(__le32 *)p_data = le32_to_cpu(u32_word);
+			p_data = p_data + 4;
+			addr = addr + 4;
+		}
+	}
+
+exit_lockless_read:
+	return ret_val;
+}
+
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+	if (qla4_83xx_flash_lock(ha))
+		ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
+
+	/*
+	 * We got the lock, or someone else is holding the lock
+	 * since we are restting, forcefully unlock
+	 */
+	qla4_83xx_flash_unlock(ha);
+}
+
+#define INTENT_TO_RECOVER	0x01
+#define PROCEED_TO_RECOVER	0x02
+
+static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
+{
+
+	uint32_t lock = 0, lockid;
+	int ret_val = QLA_ERROR;
+
+	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+
+	/* Check for other Recovery in progress, go wait */
+	if ((lockid & 0x3) != 0)
+		goto exit_lock_recovery;
+
+	/* Intent to Recover */
+	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+				   (ha->func_num << 2) | INTENT_TO_RECOVER);
+
+	msleep(200);
+
+	/* Check Intent to Recover is advertised */
+	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
+	if ((lockid & 0x3C) != (ha->func_num << 2))
+		goto exit_lock_recovery;
+
+	ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
+		   __func__, ha->func_num);
+
+	/* Proceed to Recover */
+	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
+				   (ha->func_num << 2) | PROCEED_TO_RECOVER);
+
+	/* Force Unlock */
+	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
+	ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
+
+	/* Clear bits 0-5 in IDC_RECOVERY register*/
+	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
+
+	/* Get lock */
+	lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
+	if (lock) {
+		lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
+		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
+		ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
+		ret_val = QLA_SUCCESS;
+	}
+
+exit_lock_recovery:
+	return ret_val;
+}
+
+#define	QLA83XX_DRV_LOCK_MSLEEP		200
+
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
+{
+	int timeout = 0;
+	uint32_t status = 0;
+	int ret_val = QLA_SUCCESS;
+	uint32_t first_owner = 0;
+	uint32_t tmo_owner = 0;
+	uint32_t lock_id;
+	uint32_t func_num;
+	uint32_t lock_cnt;
+
+	while (status == 0) {
+		status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
+		if (status) {
+			/* Increment Counter (8-31) and update func_num (0-7) on
+			 * getting a successful lock  */
+			lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
+			qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
+			break;
+		}
+
+		if (timeout == 0)
+			/* Save counter + ID of function holding the lock for
+			 * first failure */
+			first_owner = ha->isp_ops->rd_reg_direct(ha,
+							  QLA83XX_DRV_LOCK_ID);
+
+		if (++timeout >=
+		    (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
+			tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+			func_num = tmo_owner & 0xFF;
+			lock_cnt = tmo_owner >> 8;
+			ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
+				   __func__, ha->func_num, func_num, lock_cnt,
+				   (first_owner & 0xFF));
+
+			if (first_owner != tmo_owner) {
+				/* Some other driver got lock, OR same driver
+				 * got lock again (counter value changed), when
+				 * we were waiting for lock.
+				 * Retry for another 2 sec */
+				ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
+					   __func__, ha->func_num);
+				timeout = 0;
+			} else {
+				/* Same driver holding lock > 2sec.
+				 * Force Recovery */
+				ret_val = qla4_83xx_lock_recovery(ha);
+				if (ret_val == QLA_SUCCESS) {
+					/* Recovered and got lock */
+					ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
+						   __func__, ha->func_num);
+					break;
+				}
+				/* Recovery Failed, some other function
+				 * has the lock, wait for 2secs and retry */
+				ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
+					   __func__, ha->func_num);
+				timeout = 0;
+			}
+		}
+		msleep(QLA83XX_DRV_LOCK_MSLEEP);
+	}
+
+	return ret_val;
+}
+
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
+{
+	int id;
+
+	id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
+
+	if ((id & 0xFF) != ha->func_num) {
+		ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
+			   __func__, ha->func_num, (id & 0xFF));
+		return;
+	}
+
+	/* Keep lock counter value, update the ha->func_num to 0xFF */
+	qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
+	qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
+}
+
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
+{
+	uint32_t idc_ctrl;
+
+	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+	idc_ctrl |= DONTRESET_BIT0;
+	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+			  idc_ctrl));
+}
+
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
+{
+	uint32_t idc_ctrl;
+
+	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+	idc_ctrl &= ~DONTRESET_BIT0;
+	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
+			  idc_ctrl));
+}
+
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
+{
+	uint32_t idc_ctrl;
+
+	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+	return idc_ctrl & DONTRESET_BIT0;
+}
+
+/*-------------------------IDC State Machine ---------------------*/
+
+enum {
+	UNKNOWN_CLASS = 0,
+	NIC_CLASS,
+	FCOE_CLASS,
+	ISCSI_CLASS
+};
+
+struct device_info {
+	int func_num;
+	int device_type;
+	int port_num;
+};
+
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+{
+	uint32_t drv_active;
+	uint32_t dev_part, dev_part1, dev_part2;
+	int i;
+	struct device_info device_map[16];
+	int func_nibble;
+	int nibble;
+	int nic_present = 0;
+	int iscsi_present = 0;
+	int iscsi_func_low = 0;
+
+	/* Use the dev_partition register to determine the PCI function number
+	 * and then check drv_active register to see which driver is loaded */
+	dev_part1 = qla4_83xx_rd_reg(ha,
+				     ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
+	dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
+	drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
+
+	/* Each function has 4 bits in dev_partition Info register,
+	 * Lower 2 bits - device type, Upper 2 bits - physical port number */
+	dev_part = dev_part1;
+	for (i = nibble = 0; i <= 15; i++, nibble++) {
+		func_nibble = dev_part & (0xF << (nibble * 4));
+		func_nibble >>= (nibble * 4);
+		device_map[i].func_num = i;
+		device_map[i].device_type = func_nibble & 0x3;
+		device_map[i].port_num = func_nibble & 0xC;
+
+		if (device_map[i].device_type == NIC_CLASS) {
+			if (drv_active & (1 << device_map[i].func_num)) {
+				nic_present++;
+				break;
+			}
+		} else if (device_map[i].device_type == ISCSI_CLASS) {
+			if (drv_active & (1 << device_map[i].func_num)) {
+				if (!iscsi_present ||
+				    (iscsi_present &&
+				     (iscsi_func_low > device_map[i].func_num)))
+					iscsi_func_low = device_map[i].func_num;
+
+				iscsi_present++;
+			}
+		}
+
+		/* For function_num[8..15] get info from dev_part2 register */
+		if (nibble == 7) {
+			nibble = 0;
+			dev_part = dev_part2;
+		}
+	}
+
+	/* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
+	 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
+	 * present. */
+	if (!nic_present && (ha->func_num == iscsi_func_low)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: can reset - NIC not present and lower iSCSI function is %d\n",
+				  __func__, ha->func_num));
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * qla4_83xx_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
+{
+	uint32_t dev_state, drv_state, drv_active;
+	unsigned long reset_timeout, dev_init_timeout;
+
+	ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
+		   __func__);
+
+	if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
+				  __func__));
+		qla4_8xxx_set_rst_ready(ha);
+
+		/* Non-reset owners ACK Reset and wait for device INIT state
+		 * as part of Reset Recovery by Reset Owner */
+		dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+		do {
+			if (time_after_eq(jiffies, dev_init_timeout)) {
+				ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
+					   __func__);
+				break;
+			}
+
+			ha->isp_ops->idc_unlock(ha);
+			msleep(1000);
+			ha->isp_ops->idc_lock(ha);
+
+			dev_state = qla4_8xxx_rd_direct(ha,
+							QLA8XXX_CRB_DEV_STATE);
+		} while (dev_state == QLA8XXX_DEV_NEED_RESET);
+	} else {
+		qla4_8xxx_set_rst_ready(ha);
+		reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+		drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+		drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+		ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
+			   __func__, drv_state, drv_active);
+
+		while (drv_state != drv_active) {
+			if (time_after_eq(jiffies, reset_timeout)) {
+				ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+					   __func__, DRIVER_NAME, drv_state,
+					   drv_active);
+				break;
+			}
+
+			ha->isp_ops->idc_unlock(ha);
+			msleep(1000);
+			ha->isp_ops->idc_lock(ha);
+
+			drv_state = qla4_8xxx_rd_direct(ha,
+							QLA8XXX_CRB_DRV_STATE);
+			drv_active = qla4_8xxx_rd_direct(ha,
+							QLA8XXX_CRB_DRV_ACTIVE);
+		}
+
+		if (drv_state != drv_active) {
+			ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
+				   __func__, (drv_active ^ drv_state));
+			drv_active = drv_active & drv_state;
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
+					    drv_active);
+		}
+
+		clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
+		/* Start Reset Recovery */
+		qla4_8xxx_device_bootstrap(ha);
+	}
+}
+
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
+{
+	uint32_t idc_params, ret_val;
+
+	ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
+					   (uint8_t *)&idc_params, 1);
+	if (ret_val == QLA_SUCCESS) {
+		ha->nx_dev_init_timeout = idc_params & 0xFFFF;
+		ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
+	} else {
+		ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
+		ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
+	}
+
+	DEBUG2(ql4_printk(KERN_DEBUG, ha,
+			  "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
+			  __func__, ha->nx_dev_init_timeout,
+			  ha->nx_reset_timeout));
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+
+static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
+{
+	uint8_t *phdr;
+
+	if (!ha->reset_tmplt.buff) {
+		ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
+			   __func__);
+		return;
+	}
+
+	phdr = ha->reset_tmplt.buff;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
+			  *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+			  *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+			  *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+			  *(phdr+13), *(phdr+14), *(phdr+15)));
+}
+
+static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
+{
+	uint8_t *p_cache;
+	uint32_t src, count, size;
+	uint64_t dest;
+	int ret_val = QLA_SUCCESS;
+
+	src = QLA83XX_BOOTLOADER_FLASH_ADDR;
+	dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
+	size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
+
+	/* 128 bit alignment check */
+	if (size & 0xF)
+		size = (size + 16) & ~0xF;
+
+	/* 16 byte count */
+	count = size/16;
+
+	p_cache = vmalloc(size);
+	if (p_cache == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
+			   __func__);
+		ret_val = QLA_ERROR;
+		goto exit_copy_bootloader;
+	}
+
+	ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
+						    size / sizeof(uint32_t));
+	if (ret_val == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
+			   __func__);
+		goto exit_copy_error;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
+			  __func__));
+
+	/* 128 bit/16 byte write to MS memory */
+	ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
+					      count);
+	if (ret_val == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
+			   __func__);
+		goto exit_copy_error;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
+			  __func__, size));
+
+exit_copy_error:
+	vfree(p_cache);
+
+exit_copy_bootloader:
+	return ret_val;
+}
+
+static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
+{
+	uint32_t val, ret_val = QLA_ERROR;
+	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+
+	do {
+		val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
+		if (val == PHAN_INITIALIZE_COMPLETE) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: Command Peg initialization complete. State=0x%x\n",
+					  __func__, val));
+			ret_val = QLA_SUCCESS;
+			break;
+		}
+		msleep(CRB_CMDPEG_CHECK_DELAY);
+	} while (--retries);
+
+	return ret_val;
+}
+
+/**
+ * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ **/
+static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
+			      int duration, uint32_t test_mask,
+			      uint32_t test_result)
+{
+	uint32_t value;
+	uint8_t retries;
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+	if (ret_val == QLA_ERROR)
+		goto exit_poll_reg;
+
+	retries = duration / 10;
+	do {
+		if ((value & test_mask) != test_result) {
+			msleep(duration / 10);
+			ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
+			if (ret_val == QLA_ERROR)
+				goto exit_poll_reg;
+
+			ret_val = QLA_ERROR;
+		} else {
+			ret_val = QLA_SUCCESS;
+			break;
+		}
+	} while (retries--);
+
+exit_poll_reg:
+	if (ret_val == QLA_ERROR) {
+		ha->reset_tmplt.seq_error++;
+		ql4_printk(KERN_ERR, ha, "%s: Poll Failed:  0x%08x 0x%08x 0x%08x\n",
+			   __func__, value, test_mask, test_result);
+	}
+
+	return ret_val;
+}
+
+static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
+{
+	uint32_t sum =  0;
+	uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
+	int u16_count =  ha->reset_tmplt.hdr->size / sizeof(uint16_t);
+	int ret_val;
+
+	while (u16_count-- > 0)
+		sum += *buff++;
+
+	while (sum >> 16)
+		sum = (sum & 0xFFFF) +  (sum >> 16);
+
+	/* checksum of 0 indicates a valid template */
+	if (~sum) {
+		ret_val = QLA_SUCCESS;
+	} else {
+		ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
+			   __func__);
+		ret_val = QLA_ERROR;
+	}
+
+	return ret_val;
+}
+
+/**
+ * qla4_83xx_read_reset_template - Read Reset Template from Flash
+ * @ha: Pointer to adapter structure
+ **/
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
+{
+	uint8_t *p_buff;
+	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+	uint32_t ret_val;
+
+	ha->reset_tmplt.seq_error = 0;
+	ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
+	if (ha->reset_tmplt.buff == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
+			   __func__);
+		goto exit_read_reset_template;
+	}
+
+	p_buff = ha->reset_tmplt.buff;
+	addr = QLA83XX_RESET_TEMPLATE_ADDR;
+
+	tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
+				    sizeof(uint32_t);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: Read template hdr size %d from Flash\n",
+			  __func__, tmplt_hdr_def_size));
+
+	/* Copy template header from flash */
+	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+					   tmplt_hdr_def_size);
+	if (ret_val != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
+			   __func__);
+		goto exit_read_template_error;
+	}
+
+	ha->reset_tmplt.hdr =
+		(struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
+
+	/* Validate the template header size and signature */
+	tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+	    (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+		ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
+			   __func__, tmplt_hdr_size, tmplt_hdr_def_size);
+		goto exit_read_template_error;
+	}
+
+	addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
+	p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
+	tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
+			      ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: Read rest of the template size %d\n",
+			  __func__, ha->reset_tmplt.hdr->size));
+
+	/* Copy rest of the template */
+	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
+					   tmplt_hdr_def_size);
+	if (ret_val != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
+			   __func__);
+		goto exit_read_template_error;
+	}
+
+	/* Integrity check */
+	if (qla4_83xx_reset_seq_checksum_test(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
+			   __func__);
+		goto exit_read_template_error;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
+			  __func__));
+
+	/* Get STOP, START, INIT sequence offsets */
+	ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
+				      ha->reset_tmplt.hdr->init_seq_offset;
+	ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
+				       ha->reset_tmplt.hdr->start_seq_offset;
+	ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
+				      ha->reset_tmplt.hdr->hdr_size;
+	qla4_83xx_dump_reset_seq_hdr(ha);
+
+	goto exit_read_reset_template;
+
+exit_read_template_error:
+	vfree(ha->reset_tmplt.buff);
+
+exit_read_reset_template:
+	return;
+}
+
+/**
+ * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ **/
+static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
+					 uint32_t raddr, uint32_t waddr)
+{
+	uint32_t value;
+
+	qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+	qla4_83xx_wr_reg_indirect(ha, waddr, value);
+}
+
+/**
+ * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
+ *
+ * This function read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ **/
+static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
+				  uint32_t waddr,
+				  struct qla4_83xx_rmw *p_rmw_hdr)
+{
+	uint32_t value;
+
+	if (p_rmw_hdr->index_a)
+		value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
+	else
+		qla4_83xx_rd_reg_indirect(ha, raddr, &value);
+
+	value &= p_rmw_hdr->test_mask;
+	value <<= p_rmw_hdr->shl;
+	value >>= p_rmw_hdr->shr;
+	value |= p_rmw_hdr->or_value;
+	value ^= p_rmw_hdr->xor_value;
+
+	qla4_83xx_wr_reg_indirect(ha, waddr, value);
+
+	return;
+}
+
+static void qla4_83xx_write_list(struct scsi_qla_host *ha,
+				 struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	struct qla4_83xx_entry *p_entry;
+	uint32_t i;
+
+	p_entry = (struct qla4_83xx_entry *)
+		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
+				      struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	struct qla4_83xx_entry *p_entry;
+	uint32_t i;
+
+	p_entry = (struct qla4_83xx_entry *)
+		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
+				struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	struct qla4_83xx_entry *p_entry;
+	struct qla4_83xx_poll *p_poll;
+	uint32_t i;
+	uint32_t value;
+
+	p_poll = (struct qla4_83xx_poll *)
+		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+
+	/* Entries start after 8 byte qla4_83xx_poll, poll header contains
+	 * the test_mask, test_value. */
+	p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
+					     sizeof(struct qla4_83xx_poll));
+
+	delay = (long)p_hdr->delay;
+	if (!delay) {
+		for (i = 0; i < p_hdr->count; i++, p_entry++) {
+			qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+					   p_poll->test_mask,
+					   p_poll->test_value);
+		}
+	} else {
+		for (i = 0; i < p_hdr->count; i++, p_entry++) {
+			if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
+					       p_poll->test_mask,
+					       p_poll->test_value)) {
+				qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
+							  &value);
+				qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
+							  &value);
+			}
+		}
+	}
+}
+
+static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
+				      struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	struct qla4_83xx_quad_entry *p_entry;
+	struct qla4_83xx_poll *p_poll;
+	uint32_t i;
+
+	p_poll = (struct qla4_83xx_poll *)
+		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+	p_entry = (struct qla4_83xx_quad_entry *)
+		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+	delay = (long)p_hdr->delay;
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
+					  p_entry->dr_value);
+		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+					  p_entry->ar_value);
+		if (delay) {
+			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+					       p_poll->test_mask,
+					       p_poll->test_value)) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
+						  __func__, i,
+						  ha->reset_tmplt.seq_index));
+			}
+		}
+	}
+}
+
+static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
+					struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	struct qla4_83xx_entry *p_entry;
+	struct qla4_83xx_rmw *p_rmw_hdr;
+	uint32_t i;
+
+	p_rmw_hdr = (struct qla4_83xx_rmw *)
+		    ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+	p_entry = (struct qla4_83xx_entry *)
+		  ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
+				      p_rmw_hdr);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+static void qla4_83xx_pause(struct scsi_qla_host *ha,
+			    struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	if (p_hdr->delay)
+		mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
+				     struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	int index;
+	struct qla4_83xx_quad_entry *p_entry;
+	struct qla4_83xx_poll *p_poll;
+	uint32_t i;
+	uint32_t value;
+
+	p_poll = (struct qla4_83xx_poll *)
+		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
+	p_entry = (struct qla4_83xx_quad_entry *)
+		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
+	delay = (long)p_hdr->delay;
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
+					  p_entry->ar_value);
+		if (delay) {
+			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
+					       p_poll->test_mask,
+					       p_poll->test_value)) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
+						  __func__, i,
+						  ha->reset_tmplt.seq_index));
+			} else {
+				index = ha->reset_tmplt.array_index;
+				qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
+							  &value);
+				ha->reset_tmplt.array[index++] = value;
+
+				if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
+					ha->reset_tmplt.array_index = 1;
+			}
+		}
+	}
+}
+
+static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
+			      struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	ha->reset_tmplt.seq_end = 1;
+}
+
+static void qla4_83xx_template_end(struct scsi_qla_host *ha,
+				   struct qla4_83xx_reset_entry_hdr *p_hdr)
+{
+	ha->reset_tmplt.template_end = 1;
+
+	if (ha->reset_tmplt.seq_error == 0) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Reset sequence completed SUCCESSFULLY.\n",
+				  __func__));
+	} else {
+		ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
+			   __func__);
+	}
+}
+
+/**
+ * qla4_83xx_process_reset_template - Process reset template.
+ *
+ * Process all entries in reset template till entry with SEQ_END opcode,
+ * which indicates end of the reset template processing. Each entry has a
+ * Reset Entry header, entry opcode/command, with size of the entry, number
+ * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ **/
+static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
+					     char *p_buff)
+{
+	int index, entries;
+	struct qla4_83xx_reset_entry_hdr *p_hdr;
+	char *p_entry = p_buff;
+
+	ha->reset_tmplt.seq_end = 0;
+	ha->reset_tmplt.template_end = 0;
+	entries = ha->reset_tmplt.hdr->entries;
+	index = ha->reset_tmplt.seq_index;
+
+	for (; (!ha->reset_tmplt.seq_end) && (index  < entries); index++) {
+
+		p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
+		switch (p_hdr->cmd) {
+		case OPCODE_NOP:
+			break;
+		case OPCODE_WRITE_LIST:
+			qla4_83xx_write_list(ha, p_hdr);
+			break;
+		case OPCODE_READ_WRITE_LIST:
+			qla4_83xx_read_write_list(ha, p_hdr);
+			break;
+		case OPCODE_POLL_LIST:
+			qla4_83xx_poll_list(ha, p_hdr);
+			break;
+		case OPCODE_POLL_WRITE_LIST:
+			qla4_83xx_poll_write_list(ha, p_hdr);
+			break;
+		case OPCODE_READ_MODIFY_WRITE:
+			qla4_83xx_read_modify_write(ha, p_hdr);
+			break;
+		case OPCODE_SEQ_PAUSE:
+			qla4_83xx_pause(ha, p_hdr);
+			break;
+		case OPCODE_SEQ_END:
+			qla4_83xx_seq_end(ha, p_hdr);
+			break;
+		case OPCODE_TMPL_END:
+			qla4_83xx_template_end(ha, p_hdr);
+			break;
+		case OPCODE_POLL_READ_LIST:
+			qla4_83xx_poll_read_list(ha, p_hdr);
+			break;
+		default:
+			ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
+				   __func__, p_hdr->cmd, index);
+			break;
+		}
+
+		/* Set pointer to next entry in the sequence. */
+		p_entry += p_hdr->size;
+	}
+
+	ha->reset_tmplt.seq_index = index;
+}
+
+static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
+{
+	ha->reset_tmplt.seq_index = 0;
+	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
+
+	if (ha->reset_tmplt.seq_end != 1)
+		ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
+			   __func__);
+}
+
+static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
+{
+	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
+
+	if (ha->reset_tmplt.template_end != 1)
+		ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
+			   __func__);
+}
+
+static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
+{
+	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
+
+	if (ha->reset_tmplt.seq_end != 1)
+		ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
+			   __func__);
+}
+
+static int qla4_83xx_restart(struct scsi_qla_host *ha)
+{
+	int ret_val = QLA_SUCCESS;
+	uint32_t idc_ctrl;
+
+	qla4_83xx_process_stop_seq(ha);
+
+	/*
+	 * Collect minidump.
+	 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
+	 * don't collect minidump
+	 */
+	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+				 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+		ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
+			   __func__);
+	} else {
+		qla4_8xxx_get_minidump(ha);
+	}
+
+	qla4_83xx_process_init_seq(ha);
+
+	if (qla4_83xx_copy_bootloader(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
+			   __func__);
+		ret_val = QLA_ERROR;
+		goto exit_restart;
+	}
+
+	qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
+	qla4_83xx_process_start_seq(ha);
+
+exit_restart:
+	return ret_val;
+}
+
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
+{
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla4_83xx_restart(ha);
+	if (ret_val == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
+		goto exit_start_fw;
+	} else {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
+				  __func__));
+	}
+
+	ret_val = qla4_83xx_check_cmd_peg_status(ha);
+	if (ret_val == QLA_ERROR)
+		ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
+			   __func__);
+
+exit_start_fw:
+	return ret_val;
+}
+
+/*----------------------Interrupt Related functions ---------------------*/
+
+static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
+{
+	if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
+		qla4_8xxx_intr_disable(ha);
+}
+
+static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
+{
+	uint32_t mb_int, ret;
+
+	if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+		ret = readl(&ha->qla4_83xx_reg->mbox_int);
+		mb_int = ret & ~INT_ENABLE_FW_MB;
+		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+		writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+	}
+}
+
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+	qla4_83xx_disable_mbox_intrs(ha);
+	qla4_83xx_disable_iocb_intrs(ha);
+}
+
+static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
+{
+	if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
+		qla4_8xxx_intr_enable(ha);
+		set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
+	}
+}
+
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
+{
+	uint32_t mb_int;
+
+	if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+		mb_int = INT_ENABLE_FW_MB;
+		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+		writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+		set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
+	}
+}
+
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+	qla4_83xx_enable_mbox_intrs(ha);
+	qla4_83xx_enable_iocb_intrs(ha);
+}
+
+
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+			      int incount)
+{
+	int i;
+
+	/* Load all mailbox registers, except mailbox 0. */
+	for (i = 1; i < incount; i++)
+		writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
+
+	writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
+
+	/* Set Host Interrupt register to 1, to tell the firmware that
+	 * a mailbox command is pending. Firmware after reading the
+	 * mailbox command, clears the host interrupt register */
+	writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
+}
+
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
+{
+	int intr_status;
+
+	intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
+	if (intr_status) {
+		ha->mbox_status_count = outcount;
+		ha->isp_ops->interrupt_service_routine(ha, intr_status);
+	}
+}
+
+/**
+ * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * @ha: pointer to host adapter structure.
+ **/
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
+{
+	int rval;
+	uint32_t dev_state;
+
+	ha->isp_ops->idc_lock(ha);
+	dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+
+	if (ql4xdontresethba)
+		qla4_83xx_set_idc_dontreset(ha);
+
+	if (dev_state == QLA8XXX_DEV_READY) {
+		/* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
+		 * recovery */
+		if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
+			ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
+				   __func__);
+			rval = QLA_ERROR;
+			goto exit_isp_reset;
+		}
+
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
+				  __func__));
+		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+				    QLA8XXX_DEV_NEED_RESET);
+
+	} else {
+		/* If device_state is NEED_RESET, go ahead with
+		 * Reset,irrespective of ql4xdontresethba. This is to allow a
+		 * non-reset-owner to force a reset. Non-reset-owner sets
+		 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+		 * and then forces a Reset by setting device_state to
+		 * NEED_RESET. */
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: HW state already set to NEED_RESET\n",
+				  __func__));
+	}
+
+	/* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
+	 * priority and which drivers are present. Unlike ISP8022, the function
+	 * setting NEED_RESET, may not be the Reset owner. */
+	if (qla4_83xx_can_perform_reset(ha))
+		set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+
+	ha->isp_ops->idc_unlock(ha);
+	rval = qla4_8xxx_device_state_handler(ha);
+
+	ha->isp_ops->idc_lock(ha);
+	qla4_8xxx_clear_rst_ready(ha);
+exit_isp_reset:
+	ha->isp_ops->idc_unlock(ha);
+
+	if (rval == QLA_SUCCESS)
+		clear_bit(AF_FW_RECOVERY, &ha->flags);
+
+	return rval;
+}
+
+static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
+{
+	u32 val = 0, val1 = 0;
+	int i, status = QLA_SUCCESS;
+
+	status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
+
+	/* Port 0 Rx Buffer Pause Threshold Registers. */
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+		"Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+	for (i = 0; i < 8; i++) {
+		status = qla4_83xx_rd_reg_indirect(ha,
+				QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
+		DEBUG2(pr_info("0x%x ", val));
+	}
+
+	DEBUG2(pr_info("\n"));
+
+	/* Port 1 Rx Buffer Pause Threshold Registers. */
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+		"Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
+	for (i = 0; i < 8; i++) {
+		status = qla4_83xx_rd_reg_indirect(ha,
+				QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
+		DEBUG2(pr_info("0x%x  ", val));
+	}
+
+	DEBUG2(pr_info("\n"));
+
+	/* Port 0 RxB Traffic Class Max Cell Registers. */
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+		"Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
+	for (i = 0; i < 4; i++) {
+		status = qla4_83xx_rd_reg_indirect(ha,
+			       QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
+		DEBUG2(pr_info("0x%x  ", val));
+	}
+
+	DEBUG2(pr_info("\n"));
+
+	/* Port 1 RxB Traffic Class Max Cell Registers. */
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+		"Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
+	for (i = 0; i < 4; i++) {
+		status = qla4_83xx_rd_reg_indirect(ha,
+			       QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
+		DEBUG2(pr_info("0x%x  ", val));
+	}
+
+	DEBUG2(pr_info("\n"));
+
+	/* Port 0 RxB Rx Traffic Class Stats. */
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
+	for (i = 7; i >= 0; i--) {
+		status = qla4_83xx_rd_reg_indirect(ha,
+						   QLA83XX_PORT0_RXB_TC_STATS,
+						   &val);
+		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
+		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
+					  (val | (i << 29)));
+		status = qla4_83xx_rd_reg_indirect(ha,
+						   QLA83XX_PORT0_RXB_TC_STATS,
+						   &val);
+		DEBUG2(pr_info("0x%x  ", val));
+	}
+
+	DEBUG2(pr_info("\n"));
+
+	/* Port 1 RxB Rx Traffic Class Stats. */
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
+	for (i = 7; i >= 0; i--) {
+		status = qla4_83xx_rd_reg_indirect(ha,
+						   QLA83XX_PORT1_RXB_TC_STATS,
+						   &val);
+		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
+		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
+					  (val | (i << 29)));
+		status = qla4_83xx_rd_reg_indirect(ha,
+						   QLA83XX_PORT1_RXB_TC_STATS,
+						   &val);
+		DEBUG2(pr_info("0x%x  ", val));
+	}
+
+	DEBUG2(pr_info("\n"));
+
+	status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+					   &val);
+	status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+					   &val1);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+			  val, val1));
+}
+
+static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+	int i;
+
+	/* set SRE-Shim Control Register */
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
+				  QLA83XX_SET_PAUSE_VAL);
+
+	for (i = 0; i < 8; i++) {
+		/* Port 0 Rx Buffer Pause Threshold Registers. */
+		qla4_83xx_wr_reg_indirect(ha,
+				      QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
+				      QLA83XX_SET_PAUSE_VAL);
+		/* Port 1 Rx Buffer Pause Threshold Registers. */
+		qla4_83xx_wr_reg_indirect(ha,
+				      QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
+				      QLA83XX_SET_PAUSE_VAL);
+	}
+
+	for (i = 0; i < 4; i++) {
+		/* Port 0 RxB Traffic Class Max Cell Registers. */
+		qla4_83xx_wr_reg_indirect(ha,
+				     QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
+				     QLA83XX_SET_TC_MAX_CELL_VAL);
+		/* Port 1 RxB Traffic Class Max Cell Registers. */
+		qla4_83xx_wr_reg_indirect(ha,
+				     QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
+				     QLA83XX_SET_TC_MAX_CELL_VAL);
+	}
+
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
+				  QLA83XX_SET_PAUSE_VAL);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
+				  QLA83XX_SET_PAUSE_VAL);
+
+	ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
+}
+
+/**
+ * qla4_83xx_eport_init - Initialize EPort.
+ * @ha: Pointer to host adapter structure.
+ *
+ * If EPort hardware is in reset state before disabling pause, there would be
+ * serious hardware wedging issues. To prevent this perform eport init everytime
+ * before disabling pause frames.
+ **/
+static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
+{
+	/* Clear the 8 registers */
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
+
+	/* Write any value to Reset Control register */
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
+
+	ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
+}
+
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
+{
+	ha->isp_ops->idc_lock(ha);
+	/* Before disabling pause frames, ensure that eport is not in reset */
+	qla4_83xx_eport_init(ha);
+	qla4_83xx_dump_pause_control_regs(ha);
+	__qla4_83xx_disable_pause(ha);
+	ha->isp_ops->idc_unlock(ha);
+}
+
+/**
+ * qla4_83xx_is_detached - Check if we are marked invisible.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4_83xx_is_detached(struct scsi_qla_host *ha)
+{
+	uint32_t drv_active;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+	if (test_bit(AF_INIT_DONE, &ha->flags) &&
+	    !(drv_active & (1 << ha->func_num))) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
+				  __func__, drv_active));
+		return QLA_SUCCESS;
+	}
+
+	return QLA_ERROR;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_83xx.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_83xx.h
new file mode 100644
index 0000000..775fdf9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -0,0 +1,371 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL483XX_H
+#define __QL483XX_H
+
+/* Indirectly Mapped Registers */
+#define QLA83XX_FLASH_SPI_STATUS	0x2808E010
+#define QLA83XX_FLASH_SPI_CONTROL	0x2808E014
+#define QLA83XX_FLASH_STATUS		0x42100004
+#define QLA83XX_FLASH_CONTROL		0x42110004
+#define QLA83XX_FLASH_ADDR		0x42110008
+#define QLA83XX_FLASH_WRDATA		0x4211000C
+#define QLA83XX_FLASH_RDDATA		0x42110018
+#define QLA83XX_FLASH_DIRECT_WINDOW	0x42110030
+#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Directly Mapped Registers in 83xx register table */
+
+/* Flash access regs */
+#define QLA83XX_FLASH_LOCK		0x3850
+#define QLA83XX_FLASH_UNLOCK		0x3854
+#define QLA83XX_FLASH_LOCK_ID		0x3500
+
+/* Driver Lock regs */
+#define QLA83XX_DRV_LOCK		0x3868
+#define QLA83XX_DRV_UNLOCK		0x386C
+#define QLA83XX_DRV_LOCK_ID		0x3504
+#define QLA83XX_DRV_LOCKRECOVERY	0x379C
+
+/* IDC version */
+#define QLA83XX_IDC_VER_MAJ_VALUE       0x1
+#define QLA83XX_IDC_VER_MIN_VALUE       0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA83XX_CRB_IDC_VER_MAJOR	0x3780
+#define QLA83XX_CRB_IDC_VER_MINOR	0x3798
+#define QLA83XX_IDC_DRV_CTRL		0x3790
+#define QLA83XX_IDC_DRV_AUDIT		0x3794
+#define QLA83XX_SRE_SHIM_CONTROL	0x0D200284
+#define QLA83XX_PORT0_RXB_PAUSE_THRS	0x0B2003A4
+#define QLA83XX_PORT1_RXB_PAUSE_THRS	0x0B2013A4
+#define QLA83XX_PORT0_RXB_TC_MAX_CELL	0x0B200388
+#define QLA83XX_PORT1_RXB_TC_MAX_CELL	0x0B201388
+#define QLA83XX_PORT0_RXB_TC_STATS	0x0B20039C
+#define QLA83XX_PORT1_RXB_TC_STATS	0x0B20139C
+#define QLA83XX_PORT2_IFB_PAUSE_THRS	0x0B200704
+#define QLA83XX_PORT3_IFB_PAUSE_THRS	0x0B201704
+
+/* set value to pause threshold value */
+#define QLA83XX_SET_PAUSE_VAL		0x0
+#define QLA83XX_SET_TC_MAX_CELL_VAL	0x03FF03FF
+
+#define QLA83XX_RESET_CONTROL		0x28084E50
+#define QLA83XX_RESET_REG		0x28084E60
+#define QLA83XX_RESET_PORT0		0x28084E70
+#define QLA83XX_RESET_PORT1		0x28084E80
+#define QLA83XX_RESET_PORT2		0x28084E90
+#define QLA83XX_RESET_PORT3		0x28084EA0
+#define QLA83XX_RESET_SRE_SHIM		0x28084EB0
+#define QLA83XX_RESET_EPG_SHIM		0x28084EC0
+#define QLA83XX_RESET_ETHER_PCS		0x28084ED0
+
+/* qla_83xx_reg_tbl registers */
+#define QLA83XX_PEG_HALT_STATUS1	0x34A8
+#define QLA83XX_PEG_HALT_STATUS2	0x34AC
+#define QLA83XX_PEG_ALIVE_COUNTER	0x34B0 /* FW_HEARTBEAT */
+#define QLA83XX_FW_CAPABILITIES		0x3528
+#define QLA83XX_CRB_DRV_ACTIVE		0x3788 /* IDC_DRV_PRESENCE */
+#define QLA83XX_CRB_DEV_STATE		0x3784 /* IDC_DEV_STATE */
+#define QLA83XX_CRB_DRV_STATE		0x378C /* IDC_DRV_ACK */
+#define QLA83XX_CRB_DRV_SCRATCH		0x3548
+#define QLA83XX_CRB_DEV_PART_INFO1	0x37E0
+#define QLA83XX_CRB_DEV_PART_INFO2	0x37E4
+
+#define QLA83XX_FW_VER_MAJOR		0x3550
+#define QLA83XX_FW_VER_MINOR		0x3554
+#define QLA83XX_FW_VER_SUB		0x3558
+#define QLA83XX_NPAR_STATE		0x359C
+#define QLA83XX_FW_IMAGE_VALID		0x35FC
+#define QLA83XX_CMDPEG_STATE		0x3650
+#define QLA83XX_ASIC_TEMP		0x37B4
+#define QLA83XX_FW_API			0x356C
+#define QLA83XX_DRV_OP_MODE		0x3570
+
+static const uint32_t qla4_83xx_reg_tbl[] = {
+	QLA83XX_PEG_HALT_STATUS1,
+	QLA83XX_PEG_HALT_STATUS2,
+	QLA83XX_PEG_ALIVE_COUNTER,
+	QLA83XX_CRB_DRV_ACTIVE,
+	QLA83XX_CRB_DEV_STATE,
+	QLA83XX_CRB_DRV_STATE,
+	QLA83XX_CRB_DRV_SCRATCH,
+	QLA83XX_CRB_DEV_PART_INFO1,
+	QLA83XX_CRB_IDC_VER_MAJOR,
+	QLA83XX_FW_VER_MAJOR,
+	QLA83XX_FW_VER_MINOR,
+	QLA83XX_FW_VER_SUB,
+	QLA83XX_CMDPEG_STATE,
+	QLA83XX_ASIC_TEMP,
+};
+
+#define QLA83XX_CRB_WIN_BASE		0x3800
+#define QLA83XX_CRB_WIN_FUNC(f)		(QLA83XX_CRB_WIN_BASE+((f)*4))
+#define QLA83XX_SEM_LOCK_BASE		0x3840
+#define QLA83XX_SEM_UNLOCK_BASE		0x3844
+#define QLA83XX_SEM_LOCK_FUNC(f)	(QLA83XX_SEM_LOCK_BASE+((f)*8))
+#define QLA83XX_SEM_UNLOCK_FUNC(f)	(QLA83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLA83XX_LINK_STATE(f)		(0x3698+((f) > 7 ? 4 : 0))
+#define QLA83XX_LINK_SPEED(f)		(0x36E0+(((f) >> 2) * 4))
+#define QLA83XX_MAX_LINK_SPEED(f)       (0x36F0+(((f) / 4) * 4))
+#define QLA83XX_LINK_SPEED_FACTOR	10
+
+/* FLASH API Defines */
+#define QLA83xx_FLASH_MAX_WAIT_USEC	100
+#define QLA83XX_FLASH_LOCK_TIMEOUT	10000
+#define QLA83XX_FLASH_SECTOR_SIZE	65536
+#define QLA83XX_DRV_LOCK_TIMEOUT	2000
+#define QLA83XX_FLASH_SECTOR_ERASE_CMD	0xdeadbeef
+#define QLA83XX_FLASH_WRITE_CMD		0xdacdacda
+#define QLA83XX_FLASH_BUFFER_WRITE_CMD	0xcadcadca
+#define QLA83XX_FLASH_READ_RETRY_COUNT	2000
+#define QLA83XX_FLASH_STATUS_READY	0x6
+#define QLA83XX_FLASH_BUFFER_WRITE_MIN	2
+#define QLA83XX_FLASH_BUFFER_WRITE_MAX	64
+#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA83XX_ERASE_MODE		1
+#define QLA83XX_WRITE_MODE		2
+#define QLA83XX_DWORD_WRITE_MODE	3
+
+#define QLA83XX_GLOBAL_RESET		0x38CC
+#define QLA83XX_WILDCARD		0x38F0
+#define QLA83XX_INFORMANT		0x38FC
+#define QLA83XX_HOST_MBX_CTRL		0x3038
+#define QLA83XX_FW_MBX_CTRL		0x303C
+#define QLA83XX_BOOTLOADER_ADDR		0x355C
+#define QLA83XX_BOOTLOADER_SIZE		0x3560
+#define QLA83XX_FW_IMAGE_ADDR		0x3564
+#define QLA83XX_MBX_INTR_ENABLE		0x1000
+#define QLA83XX_MBX_INTR_MASK		0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0		0x1
+#define GRACEFUL_RESET_BIT1	0x2
+
+#define QLA83XX_HALT_STATUS_INFORMATIONAL	(0x1 << 29)
+#define QLA83XX_HALT_STATUS_FW_RESET		(0x2 << 29)
+#define QLA83XX_HALT_STATUS_UNRECOVERABLE	(0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA83XX_BOOTLOADER_FLASH_ADDR	0x10000
+#define QLA83XX_BOOT_FROM_FLASH		0
+
+#define QLA83XX_IDC_PARAM_ADDR		0x3e8020
+/* Reset template definitions */
+#define QLA83XX_MAX_RESET_SEQ_ENTRIES	16
+#define QLA83XX_RESTART_TEMPLATE_SIZE	0x2000
+#define QLA83XX_RESET_TEMPLATE_ADDR	0x4F0000
+#define QLA83XX_RESET_SEQ_VERSION	0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP			0x0000
+#define OPCODE_WRITE_LIST		0x0001
+#define OPCODE_READ_WRITE_LIST		0x0002
+#define OPCODE_POLL_LIST		0x0004
+#define OPCODE_POLL_WRITE_LIST		0x0008
+#define OPCODE_READ_MODIFY_WRITE	0x0010
+#define OPCODE_SEQ_PAUSE		0x0020
+#define OPCODE_SEQ_END			0x0040
+#define OPCODE_TMPL_END			0x0080
+#define OPCODE_POLL_READ_LIST		0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE	0xCAFE
+struct qla4_83xx_reset_template_hdr {
+	__le16	version;
+	__le16	signature;
+	__le16	size;
+	__le16	entries;
+	__le16	hdr_size;
+	__le16	checksum;
+	__le16	init_seq_offset;
+	__le16	start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla4_83xx_reset_entry_hdr {
+	__le16 cmd;
+	__le16 size;
+	__le16 count;
+	__le16 delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla4_83xx_poll {
+	__le32  test_mask;
+	__le32  test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla4_83xx_rmw {
+	__le32  test_mask;
+	__le32  xor_value;
+	__le32  or_value;
+	uint8_t shl;
+	uint8_t shr;
+	uint8_t index_a;
+	uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla4_83xx_entry {
+	__le32 arg1;
+	__le32 arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla4_83xx_quad_entry {
+	__le32 dr_addr;
+	__le32 dr_value;
+	__le32 ar_addr;
+	__le32 ar_value;
+} __packed;
+
+struct qla4_83xx_reset_template {
+	int seq_index;
+	int seq_error;
+	int array_index;
+	uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
+	uint8_t *buff;
+	uint8_t *stop_offset;
+	uint8_t *start_offset;
+	uint8_t *init_offset;
+	struct qla4_83xx_reset_template_hdr *hdr;
+	uint8_t seq_end;
+	uint8_t template_end;
+};
+
+/* POLLRD Entry */
+struct qla83xx_minidump_entry_pollrd {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t read_addr;
+	uint32_t select_value;
+	uint16_t select_value_stride;
+	uint16_t op_count;
+	uint32_t poll_wait;
+	uint32_t poll_mask;
+	uint32_t data_size;
+	uint32_t rsvd_1;
+};
+
+struct qla8044_minidump_entry_rddfe {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t value;
+	uint8_t stride;
+	uint8_t stride2;
+	uint16_t count;
+	uint32_t poll;
+	uint32_t mask;
+	uint32_t modify_mask;
+	uint32_t data_size;
+	uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+	struct qla8xxx_minidump_entry_hdr h;
+
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint8_t stride_1;
+	uint8_t stride_2;
+	uint16_t count;
+	uint32_t poll;
+	uint32_t mask;
+	uint32_t value_2;
+	uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t poll;
+	uint32_t mask;
+	uint32_t data_size;
+	uint32_t rsvd;
+
+} __packed;
+
+/* RDMUX2 Entry */
+struct qla83xx_minidump_entry_rdmux2 {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t select_addr_1;
+	uint32_t select_addr_2;
+	uint32_t select_value_1;
+	uint32_t select_value_2;
+	uint32_t op_count;
+	uint32_t select_value_mask;
+	uint32_t read_addr;
+	uint8_t select_value_stride;
+	uint8_t data_size;
+	uint8_t rsvd[2];
+};
+
+/* POLLRDMWR Entry */
+struct qla83xx_minidump_entry_pollrdmwr {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t poll_wait;
+	uint32_t poll_mask;
+	uint32_t modify_mask;
+	uint32_t data_size;
+};
+
+/* IDC additional information */
+struct qla4_83xx_idc_information {
+	uint32_t request_desc;  /* IDC request descriptor */
+	uint32_t info1; /* IDC additional info */
+	uint32_t info2; /* IDC additional info */
+	uint32_t info3; /* IDC additional info */
+};
+
+#define QLA83XX_PEX_DMA_ENGINE_INDEX		8
+#define QLA83XX_PEX_DMA_BASE_ADDRESS		0x77320000
+#define QLA83XX_PEX_DMA_NUM_OFFSET		0x10000
+#define QLA83XX_PEX_DMA_CMD_ADDR_LOW		0x0
+#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH		0x04
+#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL	0x08
+
+#define QLA83XX_PEX_DMA_READ_SIZE	(16 * 1024)
+#define QLA83XX_PEX_DMA_MAX_WAIT	(100 * 100) /* Max wait of 100 msecs */
+
+/* Read Memory: For Pex-DMA */
+struct qla4_83xx_minidump_entry_rdmem_pex_dma {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t desc_card_addr;
+	uint16_t dma_desc_cmd;
+	uint8_t rsvd[2];
+	uint32_t start_dma_cmd;
+	uint8_t rsvd2[12];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+struct qla4_83xx_pex_dma_descriptor {
+	struct {
+		uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+		uint8_t rsvd[2];
+		uint16_t dma_desc_cmd;
+	} cmd;
+	uint64_t src_addr;
+	uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func,
+				* 8-15: desc-cmd */
+	uint8_t rsvd[24];
+} __packed;
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_attr.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_attr.c
new file mode 100644
index 0000000..463239c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_attr.c
@@ -0,0 +1,351 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+			     struct bin_attribute *ba, char *buf, loff_t off,
+			     size_t count)
+{
+	struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+					       struct device, kobj)));
+
+	if (is_qla40XX(ha))
+		return -EINVAL;
+
+	if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+		return 0;
+
+	return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+				       ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+			      struct bin_attribute *ba, char *buf, loff_t off,
+			      size_t count)
+{
+	struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+					       struct device, kobj)));
+	uint32_t dev_state;
+	long reading;
+	int ret = 0;
+
+	if (is_qla40XX(ha))
+		return -EINVAL;
+
+	if (off != 0)
+		return ret;
+
+	buf[1] = 0;
+	ret = kstrtol(buf, 10, &reading);
+	if (ret) {
+		ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+			   __func__, ret);
+		return ret;
+	}
+
+	switch (reading) {
+	case 0:
+		/* clear dump collection flags */
+		if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+			clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+			/* Reload minidump template */
+			qla4xxx_alloc_fw_dump(ha);
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Firmware template reloaded\n"));
+		}
+		break;
+	case 1:
+		/* Set flag to read dump */
+		if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+		    !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+			set_bit(AF_82XX_DUMP_READING, &ha->flags);
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Raw firmware dump ready for read on (%ld).\n",
+					  ha->host_no));
+		}
+		break;
+	case 2:
+		/* Reset HBA and collect FW dump */
+		ha->isp_ops->idc_lock(ha);
+		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+		if (dev_state == QLA8XXX_DEV_READY) {
+			ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
+				   __func__);
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_NEED_RESET);
+			if (is_qla8022(ha) ||
+			    ((is_qla8032(ha) || is_qla8042(ha)) &&
+			     qla4_83xx_can_perform_reset(ha))) {
+				set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+				set_bit(AF_FW_RECOVERY, &ha->flags);
+				ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
+					   __func__, ha->func_num);
+			}
+		} else
+			ql4_printk(KERN_INFO, ha,
+				   "%s: Reset not performed as device state is 0x%x\n",
+				   __func__, dev_state);
+
+		ha->isp_ops->idc_unlock(ha);
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+
+	return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+	.attr = {
+		.name = "fw_dump",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla4_8xxx_sysfs_read_fw_dump,
+	.write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+	char *name;
+	struct bin_attribute *attr;
+} bin_file_entries[] = {
+	{ "fw_dump", &sysfs_fw_dump_attr },
+	{ NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+	struct Scsi_Host *host = ha->host;
+	struct sysfs_entry *iter;
+	int ret;
+
+	for (iter = bin_file_entries; iter->name; iter++) {
+		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+					    iter->attr);
+		if (ret)
+			ql4_printk(KERN_ERR, ha,
+				   "Unable to create sysfs %s binary attribute (%d).\n",
+				   iter->name, ret);
+	}
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+	struct Scsi_Host *host = ha->host;
+	struct sysfs_entry *iter;
+
+	for (iter = bin_file_entries; iter->name; iter++)
+		sysfs_remove_bin_file(&host->shost_gendev.kobj,
+				      iter->attr);
+}
+
+/* Scsi_Host attributes. */
+static ssize_t
+qla4xxx_fw_version_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+	if (is_qla80XX(ha))
+		return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+				ha->fw_info.fw_major, ha->fw_info.fw_minor,
+				ha->fw_info.fw_patch, ha->fw_info.fw_build);
+	else
+		return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+				ha->fw_info.fw_major, ha->fw_info.fw_minor,
+				ha->fw_info.fw_patch, ha->fw_info.fw_build);
+}
+
+static ssize_t
+qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number);
+}
+
+static ssize_t
+qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
+			ha->fw_info.iscsi_minor);
+}
+
+static ssize_t
+qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
+			ha->fw_info.bootload_major, ha->fw_info.bootload_minor,
+			ha->fw_info.bootload_patch, ha->fw_info.bootload_build);
+}
+
+static ssize_t
+qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
+}
+
+static ssize_t
+qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+	qla4xxx_get_firmware_state(ha);
+	return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
+			ha->addl_fw_state);
+}
+
+static ssize_t
+qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+	if (is_qla40XX(ha))
+		return -ENOSYS;
+
+	return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
+}
+
+static ssize_t
+qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+	if (is_qla40XX(ha))
+		return -ENOSYS;
+
+	return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
+}
+
+static ssize_t
+qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+	if (is_qla40XX(ha))
+		return -ENOSYS;
+
+	return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
+}
+
+static ssize_t
+qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
+}
+
+static ssize_t
+qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
+			ha->fw_info.fw_build_time);
+}
+
+static ssize_t
+qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
+}
+
+static ssize_t
+qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
+}
+
+static ssize_t
+qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	char *load_src = NULL;
+
+	switch (ha->fw_info.fw_load_source) {
+	case 1:
+		load_src = "Flash Primary";
+		break;
+	case 2:
+		load_src = "Flash Secondary";
+		break;
+	case 3:
+		load_src = "Host Download";
+		break;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
+}
+
+static ssize_t
+qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+	qla4xxx_about_firmware(ha);
+	return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
+			ha->fw_uptime_msecs);
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
+static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
+static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL);
+static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
+static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
+static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
+static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
+static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL);
+static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL);
+static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
+		   NULL);
+static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
+static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
+
+struct device_attribute *qla4xxx_host_attrs[] = {
+	&dev_attr_fw_version,
+	&dev_attr_serial_num,
+	&dev_attr_iscsi_version,
+	&dev_attr_optrom_version,
+	&dev_attr_board_id,
+	&dev_attr_fw_state,
+	&dev_attr_phy_port_cnt,
+	&dev_attr_phy_port_num,
+	&dev_attr_iscsi_func_cnt,
+	&dev_attr_hba_model,
+	&dev_attr_fw_timestamp,
+	&dev_attr_fw_build_user,
+	&dev_attr_fw_ext_timestamp,
+	&dev_attr_fw_load_src,
+	&dev_attr_fw_uptime,
+	NULL,
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_bsg.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_bsg.c
new file mode 100644
index 0000000..415ee5e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -0,0 +1,873 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_bsg.h"
+
+static int
+qla4xxx_read_flash(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	uint32_t offset = 0;
+	uint32_t length = 0;
+	dma_addr_t flash_dma;
+	uint8_t *flash = NULL;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	if (ha->flash_state != QLFLASH_WAITING) {
+		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+			   "active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	ha->flash_state = QLFLASH_READING;
+	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	length = bsg_job->reply_payload.payload_len;
+
+	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+				   GFP_KERNEL);
+	if (!flash) {
+		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+			   "data\n", __func__);
+		rval = -ENOMEM;
+		goto leave;
+	}
+
+	rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else {
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+					    bsg_job->reply_payload.sg_cnt,
+					    flash, length);
+		bsg_reply->result = DID_OK << 16;
+	}
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+	ha->flash_state = QLFLASH_WAITING;
+	return rval;
+}
+
+static int
+qla4xxx_update_flash(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	uint32_t length = 0;
+	uint32_t offset = 0;
+	uint32_t options = 0;
+	dma_addr_t flash_dma;
+	uint8_t *flash = NULL;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	if (ha->flash_state != QLFLASH_WAITING) {
+		ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+			   "active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	ha->flash_state = QLFLASH_WRITING;
+	length = bsg_job->request_payload.payload_len;
+	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+	flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+				   GFP_KERNEL);
+	if (!flash) {
+		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+			   "data\n", __func__);
+		rval = -ENOMEM;
+		goto leave;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+			  bsg_job->request_payload.sg_cnt, flash, length);
+
+	rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else
+		bsg_reply->result = DID_OK << 16;
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+	dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+	ha->flash_state = QLFLASH_WAITING;
+	return rval;
+}
+
+static int
+qla4xxx_get_acb_state(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint32_t status[MBOX_REG_COUNT];
+	uint32_t acb_idx;
+	uint32_t ip_idx;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	/* Only 4022 and above adapters are supported */
+	if (is_qla4010(ha))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	if (bsg_job->reply_payload.payload_len < sizeof(status)) {
+		ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
+			   __func__, bsg_job->reply_payload.payload_len);
+		rval = -EINVAL;
+		goto leave;
+	}
+
+	acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+	rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
+			   __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else {
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+					    bsg_job->reply_payload.sg_cnt,
+					    status, sizeof(status));
+		bsg_reply->result = DID_OK << 16;
+	}
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+leave:
+	return rval;
+}
+
+static int
+qla4xxx_read_nvram(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint32_t offset = 0;
+	uint32_t len = 0;
+	uint32_t total_len = 0;
+	dma_addr_t nvram_dma;
+	uint8_t *nvram = NULL;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	/* Only 40xx adapters are supported */
+	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	len = bsg_job->reply_payload.payload_len;
+	total_len = offset + len;
+
+	/* total len should not be greater than max NVRAM size */
+	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+	    ((is_qla4022(ha) || is_qla4032(ha)) &&
+	     total_len > QL40X2_NVRAM_SIZE)) {
+		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+			   " nvram size, offset=%d len=%d\n",
+			   __func__, offset, len);
+		goto leave;
+	}
+
+	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+				   GFP_KERNEL);
+	if (!nvram) {
+		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
+			   "data\n", __func__);
+		rval = -ENOMEM;
+		goto leave;
+	}
+
+	rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else {
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+					    bsg_job->reply_payload.sg_cnt,
+					    nvram, len);
+		bsg_reply->result = DID_OK << 16;
+	}
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+	return rval;
+}
+
+static int
+qla4xxx_update_nvram(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint32_t offset = 0;
+	uint32_t len = 0;
+	uint32_t total_len = 0;
+	dma_addr_t nvram_dma;
+	uint8_t *nvram = NULL;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	len = bsg_job->request_payload.payload_len;
+	total_len = offset + len;
+
+	/* total len should not be greater than max NVRAM size */
+	if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+	    ((is_qla4022(ha) || is_qla4032(ha)) &&
+	     total_len > QL40X2_NVRAM_SIZE)) {
+		ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+			   " nvram size, offset=%d len=%d\n",
+			   __func__, offset, len);
+		goto leave;
+	}
+
+	nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+				   GFP_KERNEL);
+	if (!nvram) {
+		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+			   "data\n", __func__);
+		rval = -ENOMEM;
+		goto leave;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+			  bsg_job->request_payload.sg_cnt, nvram, len);
+
+	rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else
+		bsg_reply->result = DID_OK << 16;
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+	dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+	return rval;
+}
+
+static int
+qla4xxx_restore_defaults(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint32_t region = 0;
+	uint32_t field0 = 0;
+	uint32_t field1 = 0;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	if (is_qla4010(ha))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+	field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+
+	rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else
+		bsg_reply->result = DID_OK << 16;
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+leave:
+	return rval;
+}
+
+static int
+qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint32_t acb_type = 0;
+	uint32_t len = 0;
+	dma_addr_t acb_dma;
+	uint8_t *acb = NULL;
+	int rval = -EINVAL;
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto leave;
+
+	/* Only 4022 and above adapters are supported */
+	if (is_qla4010(ha))
+		goto leave;
+
+	if (ql4xxx_reset_active(ha)) {
+		ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+		rval = -EBUSY;
+		goto leave;
+	}
+
+	acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	len = bsg_job->reply_payload.payload_len;
+	if (len < sizeof(struct addr_ctrl_blk)) {
+		ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
+			   __func__, len);
+		rval = -EINVAL;
+		goto leave;
+	}
+
+	acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
+	if (!acb) {
+		ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
+			   "data\n", __func__);
+		rval = -ENOMEM;
+		goto leave;
+	}
+
+	rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
+	if (rval) {
+		ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		rval = -EIO;
+	} else {
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+					    bsg_job->reply_payload.sg_cnt,
+					    acb, len);
+		bsg_reply->result = DID_OK << 16;
+	}
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+	dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
+leave:
+	return rval;
+}
+
+static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint8_t *rsp_ptr = NULL;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_ERROR;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
+			   __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		goto exit_diag_mem_test;
+	}
+
+	bsg_reply->reply_payload_rcv_len = 0;
+	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
+	       sizeof(uint32_t) * MBOX_REG_COUNT);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
+			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
+			  mbox_cmd[7]));
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+					 &mbox_sts[0]);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
+			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
+			  mbox_sts[7]));
+
+	if (status == QLA_SUCCESS)
+		bsg_reply->result = DID_OK << 16;
+	else
+		bsg_reply->result = DID_ERROR << 16;
+
+	/* Send mbox_sts to application */
+	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
+	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
+	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
+
+exit_diag_mem_test:
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: bsg_reply->result = x%x, status = %s\n",
+			  __func__, bsg_reply->result, STATUS(status)));
+
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+}
+
+static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
+						   int wait_for_link)
+{
+	int status = QLA_SUCCESS;
+
+	if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
+		ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
+			   __func__, ha->idc_extend_tmo);
+		if (ha->idc_extend_tmo) {
+			if (!wait_for_completion_timeout(&ha->idc_comp,
+						(ha->idc_extend_tmo * HZ))) {
+				ha->notify_idc_comp = 0;
+				ha->notify_link_up_comp = 0;
+				ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
+					   __func__);
+				status = QLA_ERROR;
+				goto exit_wait;
+			} else {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "%s: IDC Complete notification received\n",
+						  __func__));
+			}
+		}
+	} else {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: IDC Complete notification received\n",
+				  __func__));
+	}
+	ha->notify_idc_comp = 0;
+
+	if (wait_for_link) {
+		if (!wait_for_completion_timeout(&ha->link_up_comp,
+						 (IDC_COMP_TOV * HZ))) {
+			ha->notify_link_up_comp = 0;
+			ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
+				   __func__);
+			status = QLA_ERROR;
+			goto exit_wait;
+		} else {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: LINK UP notification received\n",
+					  __func__));
+		}
+		ha->notify_link_up_comp = 0;
+	}
+
+exit_wait:
+	return status;
+}
+
+static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
+					 uint32_t *mbox_cmd)
+{
+	uint32_t config = 0;
+	int status = QLA_SUCCESS;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+	status = qla4_83xx_get_port_config(ha, &config);
+	if (status != QLA_SUCCESS)
+		goto exit_pre_loopback_config;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
+			  __func__, config));
+
+	if ((config & ENABLE_INTERNAL_LOOPBACK) ||
+	    (config & ENABLE_EXTERNAL_LOOPBACK)) {
+		ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n",
+			   __func__);
+		goto exit_pre_loopback_config;
+	}
+
+	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
+		config |= ENABLE_INTERNAL_LOOPBACK;
+
+	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
+		config |= ENABLE_EXTERNAL_LOOPBACK;
+
+	config &= ~ENABLE_DCBX;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
+			  __func__, config));
+
+	ha->notify_idc_comp = 1;
+	ha->notify_link_up_comp = 1;
+
+	/* get the link state */
+	qla4xxx_get_firmware_state(ha);
+
+	status = qla4_83xx_set_port_config(ha, &config);
+	if (status != QLA_SUCCESS) {
+		ha->notify_idc_comp = 0;
+		ha->notify_link_up_comp = 0;
+		goto exit_pre_loopback_config;
+	}
+exit_pre_loopback_config:
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
+			  STATUS(status)));
+	return status;
+}
+
+static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
+					  uint32_t *mbox_cmd)
+{
+	int status = QLA_SUCCESS;
+	uint32_t config = 0;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+	status = qla4_83xx_get_port_config(ha, &config);
+	if (status != QLA_SUCCESS)
+		goto exit_post_loopback_config;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
+			  config));
+
+	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
+		config &= ~ENABLE_INTERNAL_LOOPBACK;
+	else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
+		config &= ~ENABLE_EXTERNAL_LOOPBACK;
+
+	config |= ENABLE_DCBX;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: Restore default port config=%08X\n", __func__,
+			  config));
+
+	ha->notify_idc_comp = 1;
+	if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
+		ha->notify_link_up_comp = 1;
+
+	status = qla4_83xx_set_port_config(ha, &config);
+	if (status != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
+			   __func__);
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		clear_bit(AF_LOOPBACK, &ha->flags);
+		goto exit_post_loopback_config;
+	}
+
+exit_post_loopback_config:
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
+			  STATUS(status)));
+	return status;
+}
+
+static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	uint8_t *rsp_ptr = NULL;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int wait_for_link = 1;
+	int status = QLA_ERROR;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (test_bit(AF_LOOPBACK, &ha->flags)) {
+		ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
+			   __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		goto exit_loopback_cmd;
+	}
+
+	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
+			   __func__);
+		bsg_reply->result = DID_ERROR << 16;
+		goto exit_loopback_cmd;
+	}
+
+	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
+	       sizeof(uint32_t) * MBOX_REG_COUNT);
+
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
+		if (status != QLA_SUCCESS) {
+			bsg_reply->result = DID_ERROR << 16;
+			goto exit_loopback_cmd;
+		}
+
+		status = qla4_83xx_wait_for_loopback_config_comp(ha,
+								 wait_for_link);
+		if (status != QLA_SUCCESS) {
+			bsg_reply->result = DID_TIME_OUT << 16;
+			goto restore;
+		}
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
+			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
+			  mbox_cmd[7]));
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+				&mbox_sts[0]);
+
+	if (status == QLA_SUCCESS)
+		bsg_reply->result = DID_OK << 16;
+	else
+		bsg_reply->result = DID_ERROR << 16;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
+			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
+			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
+			  mbox_sts[7]));
+
+	/* Send mbox_sts to application */
+	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
+	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
+	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
+restore:
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
+		if (status != QLA_SUCCESS) {
+			bsg_reply->result = DID_ERROR << 16;
+			goto exit_loopback_cmd;
+		}
+
+		/* for pre_loopback_config() wait for LINK UP only
+		 * if PHY LINK is UP */
+		if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
+			wait_for_link = 0;
+
+		status = qla4_83xx_wait_for_loopback_config_comp(ha,
+								 wait_for_link);
+		if (status != QLA_SUCCESS) {
+			bsg_reply->result = DID_TIME_OUT << 16;
+			goto exit_loopback_cmd;
+		}
+	}
+exit_loopback_cmd:
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: bsg_reply->result = x%x, status = %s\n",
+			  __func__, bsg_reply->result, STATUS(status)));
+	bsg_job_done(bsg_job, bsg_reply->result,
+		     bsg_reply->reply_payload_rcv_len);
+}
+
+static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	uint32_t diag_cmd;
+	int rval = -EINVAL;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
+
+	diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	if (diag_cmd == MBOX_CMD_DIAG_TEST) {
+		switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
+		case QL_DIAG_CMD_TEST_DDR_SIZE:
+		case QL_DIAG_CMD_TEST_DDR_RW:
+		case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
+		case QL_DIAG_CMD_TEST_NVRAM:
+		case QL_DIAG_CMD_TEST_FLASH_ROM:
+		case QL_DIAG_CMD_TEST_DMA_XFER:
+		case QL_DIAG_CMD_SELF_DDR_RW:
+		case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
+			/* Execute diag test for adapter RAM/FLASH */
+			ql4xxx_execute_diag_cmd(bsg_job);
+			/* Always return success as we want to sent bsg_reply
+			 * to Application */
+			rval = QLA_SUCCESS;
+			break;
+
+		case QL_DIAG_CMD_TEST_INT_LOOPBACK:
+		case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
+			/* Execute diag test for Network */
+			qla4xxx_execute_diag_loopback_cmd(bsg_job);
+			/* Always return success as we want to sent bsg_reply
+			 * to Application */
+			rval = QLA_SUCCESS;
+			break;
+		default:
+			ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
+				   __func__,
+				   bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
+		}
+	} else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
+		   (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
+		ql4xxx_execute_diag_cmd(bsg_job);
+		rval = QLA_SUCCESS;
+	} else {
+		ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
+			   __func__, diag_cmd);
+	}
+
+	return rval;
+}
+
+/**
+ * qla4xxx_process_vendor_specific - handle vendor specific bsg request
+ * @job: iscsi_bsg_job to handle
+ **/
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
+{
+	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+
+	switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+	case QLISCSI_VND_READ_FLASH:
+		return qla4xxx_read_flash(bsg_job);
+
+	case QLISCSI_VND_UPDATE_FLASH:
+		return qla4xxx_update_flash(bsg_job);
+
+	case QLISCSI_VND_GET_ACB_STATE:
+		return qla4xxx_get_acb_state(bsg_job);
+
+	case QLISCSI_VND_READ_NVRAM:
+		return qla4xxx_read_nvram(bsg_job);
+
+	case QLISCSI_VND_UPDATE_NVRAM:
+		return qla4xxx_update_nvram(bsg_job);
+
+	case QLISCSI_VND_RESTORE_DEFAULTS:
+		return qla4xxx_restore_defaults(bsg_job);
+
+	case QLISCSI_VND_GET_ACB:
+		return qla4xxx_bsg_get_acb(bsg_job);
+
+	case QLISCSI_VND_DIAG_TEST:
+		return qla4xxx_execute_diag_test(bsg_job);
+
+	default:
+		ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
+			   "0x%x\n", __func__, bsg_req->msgcode);
+		bsg_reply->result = (DID_ERROR << 16);
+		bsg_reply->reply_payload_rcv_len = 0;
+		bsg_job_done(bsg_job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+		return -ENOSYS;
+	}
+}
+
+/**
+ * qla4xxx_bsg_request - handle bsg request from ISCSI transport
+ * @job: iscsi_bsg_job to handle
+ */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job)
+{
+	struct iscsi_bsg_request *bsg_req = bsg_job->request;
+	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+	struct scsi_qla_host *ha = to_qla_host(host);
+
+	switch (bsg_req->msgcode) {
+	case ISCSI_BSG_HST_VENDOR:
+		return qla4xxx_process_vendor_specific(bsg_job);
+
+	default:
+		ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
+			   __func__, bsg_req->msgcode);
+	}
+
+	return -ENOSYS;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_bsg.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_bsg.h
new file mode 100644
index 0000000..88c2401
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -0,0 +1,32 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#ifndef __QL4_BSG_H
+#define __QL4_BSG_H
+
+/* BSG Vendor specific commands */
+#define QLISCSI_VND_READ_FLASH		1
+#define QLISCSI_VND_UPDATE_FLASH	2
+#define QLISCSI_VND_GET_ACB_STATE	3
+#define QLISCSI_VND_READ_NVRAM		4
+#define QLISCSI_VND_UPDATE_NVRAM	5
+#define QLISCSI_VND_RESTORE_DEFAULTS	6
+#define QLISCSI_VND_GET_ACB		7
+#define QLISCSI_VND_DIAG_TEST		8
+
+/* QLISCSI_VND_DIAG_CMD sub code */
+#define QL_DIAG_CMD_TEST_DDR_SIZE	0x2
+#define QL_DIAG_CMD_TEST_DDR_RW		0x3
+#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW	0x4
+#define QL_DIAG_CMD_TEST_NVRAM		0x5	/* Only ISP4XXX */
+#define QL_DIAG_CMD_TEST_FLASH_ROM	0x6
+#define QL_DIAG_CMD_TEST_INT_LOOPBACK	0x7
+#define QL_DIAG_CMD_TEST_EXT_LOOPBACK	0x8
+#define QL_DIAG_CMD_TEST_DMA_XFER	0x9	/* Only ISP4XXX */
+#define QL_DIAG_CMD_SELF_DDR_RW		0xC
+#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW	0xD
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_dbg.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_dbg.c
new file mode 100644
index 0000000..5649e9e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -0,0 +1,162 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+void qla4xxx_dump_buffer(void *b, uint32_t size)
+{
+	uint32_t cnt;
+	uint8_t *c = b;
+
+	printk(" 0   1   2   3   4   5   6   7   8   9  Ah  Bh  Ch  Dh  Eh  "
+	       "Fh\n");
+	printk("------------------------------------------------------------"
+	       "--\n");
+	for (cnt = 0; cnt < size; c++) {
+		printk("%02x", *c);
+		if (!(++cnt % 16))
+			printk("\n");
+
+		else
+			printk("  ");
+	}
+	printk(KERN_INFO "\n");
+}
+
+void qla4xxx_dump_registers(struct scsi_qla_host *ha)
+{
+	uint8_t i;
+
+	if (is_qla8022(ha)) {
+		for (i = 1; i < MBOX_REG_COUNT; i++)
+			printk(KERN_INFO "mailbox[%d]     = 0x%08X\n",
+			    i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
+		return;
+	}
+
+	for (i = 0; i < MBOX_REG_COUNT; i++) {
+		printk(KERN_INFO "0x%02X mailbox[%d]      = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
+		    readw(&ha->reg->mailbox[i]));
+	}
+
+	printk(KERN_INFO "0x%02X flash_address            = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, flash_address),
+	    readw(&ha->reg->flash_address));
+	printk(KERN_INFO "0x%02X flash_data               = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, flash_data),
+	    readw(&ha->reg->flash_data));
+	printk(KERN_INFO "0x%02X ctrl_status              = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, ctrl_status),
+	    readw(&ha->reg->ctrl_status));
+
+	if (is_qla4010(ha)) {
+		printk(KERN_INFO "0x%02X nvram            = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
+		    readw(&ha->reg->u1.isp4010.nvram));
+	} else if (is_qla4022(ha) | is_qla4032(ha)) {
+		printk(KERN_INFO "0x%02X intr_mask        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
+		    readw(&ha->reg->u1.isp4022.intr_mask));
+		printk(KERN_INFO "0x%02X nvram            = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
+		    readw(&ha->reg->u1.isp4022.nvram));
+		printk(KERN_INFO "0x%02X semaphore	  = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
+		    readw(&ha->reg->u1.isp4022.semaphore));
+	}
+	printk(KERN_INFO "0x%02X req_q_in                 = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, req_q_in),
+	    readw(&ha->reg->req_q_in));
+	printk(KERN_INFO "0x%02X rsp_q_out                = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, rsp_q_out),
+	    readw(&ha->reg->rsp_q_out));
+
+	if (is_qla4010(ha)) {
+		printk(KERN_INFO "0x%02X ext_hw_conf      = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
+		    readw(&ha->reg->u2.isp4010.ext_hw_conf));
+		printk(KERN_INFO "0x%02X port_ctrl        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
+		    readw(&ha->reg->u2.isp4010.port_ctrl));
+		printk(KERN_INFO "0x%02X port_status      = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
+		    readw(&ha->reg->u2.isp4010.port_status));
+		printk(KERN_INFO "0x%02X req_q_out        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
+		    readw(&ha->reg->u2.isp4010.req_q_out));
+		printk(KERN_INFO "0x%02X gp_out           = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
+		    readw(&ha->reg->u2.isp4010.gp_out));
+		printk(KERN_INFO "0x%02X gp_in	          = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
+		    readw(&ha->reg->u2.isp4010.gp_in));
+		printk(KERN_INFO "0x%02X port_err_status  = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4010.port_err_status),
+		    readw(&ha->reg->u2.isp4010.port_err_status));
+	} else if (is_qla4022(ha) | is_qla4032(ha)) {
+		printk(KERN_INFO "Page 0 Registers:\n");
+		printk(KERN_INFO "0x%02X ext_hw_conf      = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
+		    readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
+		printk(KERN_INFO "0x%02X port_ctrl        = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
+		    readw(&ha->reg->u2.isp4022.p0.port_ctrl));
+		printk(KERN_INFO "0x%02X port_status      = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.port_status),
+		    readw(&ha->reg->u2.isp4022.p0.port_status));
+		printk(KERN_INFO "0x%02X gp_out           = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
+		    readw(&ha->reg->u2.isp4022.p0.gp_out));
+		printk(KERN_INFO "0x%02X gp_in            = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
+		    readw(&ha->reg->u2.isp4022.p0.gp_in));
+		printk(KERN_INFO "0x%02X port_err_status  = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
+		    readw(&ha->reg->u2.isp4022.p0.port_err_status));
+		printk(KERN_INFO "Page 1 Registers:\n");
+		writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+		    &ha->reg->ctrl_status);
+		printk(KERN_INFO "0x%02X req_q_out        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
+		    readw(&ha->reg->u2.isp4022.p1.req_q_out));
+		writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+		    &ha->reg->ctrl_status);
+	}
+}
+
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
+{
+	uint32_t halt_status1, halt_status2;
+
+	halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+	halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
+
+	if (is_qla8022(ha)) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
+			   " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+			   " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+			   " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+			   " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__,
+			   ha->pdev->device, halt_status1, halt_status2,
+			   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
+			   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
+			   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
+			   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
+			   qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
+			   " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
+			   ha->host_no, __func__, ha->pdev->device,
+			   halt_status1, halt_status2);
+	}
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_dbg.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_dbg.h
new file mode 100644
index 0000000..51c365b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -0,0 +1,62 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2012 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ * Driver debug definitions.
+ */
+/* #define QL_DEBUG  */			/* DEBUG messages */
+/* #define QL_DEBUG_LEVEL_3  */		/* Output function tracing */
+/* #define QL_DEBUG_LEVEL_4  */
+/* #define QL_DEBUG_LEVEL_5  */
+/* #define QL_DEBUG_LEVEL_7  */
+/* #define QL_DEBUG_LEVEL_9  */
+
+#define QL_DEBUG_LEVEL_2	/* ALways enable error messagess */
+#if defined(QL_DEBUG)
+#define DEBUG(x)   do {x;} while (0);
+#else
+#define DEBUG(x)	do {} while (0);
+#endif
+
+#if defined(QL_DEBUG_LEVEL_2)
+#define DEBUG2(x)      do {if(ql4xextended_error_logging == 2) x;} while (0);
+#define DEBUG2_3(x)   do {x;} while (0);
+#else				/*  */
+#define DEBUG2(x)	do {} while (0);
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_3)
+#define DEBUG3(x)      do {if(ql4xextended_error_logging == 3) x;} while (0);
+#else				/*  */
+#define DEBUG3(x)	do {} while (0);
+#if !defined(QL_DEBUG_LEVEL_2)
+#define DEBUG2_3(x)	do {} while (0);
+#endif				/*  */
+#endif				/*  */
+#if defined(QL_DEBUG_LEVEL_4)
+#define DEBUG4(x)	do {x;} while (0);
+#else				/*  */
+#define DEBUG4(x)	do {} while (0);
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_5)
+#define DEBUG5(x)	do {x;} while (0);
+#else				/*  */
+#define DEBUG5(x)	do {} while (0);
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_7)
+#define DEBUG7(x)	do {x; } while (0)
+#else				/*  */
+#define DEBUG7(x)	do {} while (0)
+#endif				/*  */
+
+#if defined(QL_DEBUG_LEVEL_9)
+#define DEBUG9(x)	do {x;} while (0);
+#else				/*  */
+#define DEBUG9(x)	do {} while (0);
+#endif				/*  */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_def.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_def.h
new file mode 100644
index 0000000..817f312
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_def.h
@@ -0,0 +1,1076 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QL4_DEF_H
+#define __QL4_DEF_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/aer.h>
+#include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
+
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include <scsi/scsi_netlink.h>
+#include <scsi/libiscsi.h>
+
+#include "ql4_dbg.h"
+#include "ql4_nx.h"
+#include "ql4_fw.h"
+#include "ql4_nvram.h"
+#include "ql4_83xx.h"
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
+#define PCI_DEVICE_ID_QLOGIC_ISP4010	0x4010
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
+#define PCI_DEVICE_ID_QLOGIC_ISP4022	0x4022
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
+#define PCI_DEVICE_ID_QLOGIC_ISP4032	0x4032
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8022
+#define PCI_DEVICE_ID_QLOGIC_ISP8022	0x8022
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324
+#define PCI_DEVICE_ID_QLOGIC_ISP8324	0x8032
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042
+#define PCI_DEVICE_ID_QLOGIC_ISP8042	0x8042
+#endif
+
+#define ISP4XXX_PCI_FN_1	0x1
+#define ISP4XXX_PCI_FN_2	0x3
+
+#define QLA_SUCCESS			0
+#define QLA_ERROR			1
+#define STATUS(status)		status == QLA_ERROR ? "FAILED" : "SUCCEEDED"
+
+/*
+ * Data bit definitions
+ */
+#define BIT_0	0x1
+#define BIT_1	0x2
+#define BIT_2	0x4
+#define BIT_3	0x8
+#define BIT_4	0x10
+#define BIT_5	0x20
+#define BIT_6	0x40
+#define BIT_7	0x80
+#define BIT_8	0x100
+#define BIT_9	0x200
+#define BIT_10	0x400
+#define BIT_11	0x800
+#define BIT_12	0x1000
+#define BIT_13	0x2000
+#define BIT_14	0x4000
+#define BIT_15	0x8000
+#define BIT_16	0x10000
+#define BIT_17	0x20000
+#define BIT_18	0x40000
+#define BIT_19	0x80000
+#define BIT_20	0x100000
+#define BIT_21	0x200000
+#define BIT_22	0x400000
+#define BIT_23	0x800000
+#define BIT_24	0x1000000
+#define BIT_25	0x2000000
+#define BIT_26	0x4000000
+#define BIT_27	0x8000000
+#define BIT_28	0x10000000
+#define BIT_29	0x20000000
+#define BIT_30	0x40000000
+#define BIT_31	0x80000000
+
+/**
+ * Macros to help code, maintain, etc.
+ **/
+#define ql4_printk(level, ha, format, arg...) \
+	dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
+
+
+/*
+ * Host adapter default definitions
+ ***********************************/
+#define MAX_HBAS		16
+#define MAX_BUSES		1
+#define MAX_TARGETS		MAX_DEV_DB_ENTRIES
+#define MAX_LUNS		0xffff
+#define MAX_AEN_ENTRIES		MAX_DEV_DB_ENTRIES
+#define MAX_DDB_ENTRIES		MAX_DEV_DB_ENTRIES
+#define MAX_PDU_ENTRIES		32
+#define INVALID_ENTRY		0xFFFF
+#define MAX_CMDS_TO_RISC	1024
+#define MAX_SRBS		MAX_CMDS_TO_RISC
+#define MBOX_AEN_REG_COUNT	8
+#define MAX_INIT_RETRIES	5
+
+/*
+ * Buffer sizes
+ */
+#define REQUEST_QUEUE_DEPTH		MAX_CMDS_TO_RISC
+#define RESPONSE_QUEUE_DEPTH		64
+#define QUEUE_SIZE			64
+#define DMA_BUFFER_SIZE			512
+#define IOCB_HIWAT_CUSHION		4
+
+/*
+ * Misc
+ */
+#define MAC_ADDR_LEN			6	/* in bytes */
+#define IP_ADDR_LEN			4	/* in bytes */
+#define IPv6_ADDR_LEN			16	/* IPv6 address size */
+#define DRIVER_NAME			"qla4xxx"
+
+#define MAX_LINKED_CMDS_PER_LUN		3
+#define MAX_REQS_SERVICED_PER_INTR	1
+
+#define ISCSI_IPADDR_SIZE		4	/* IP address size */
+#define ISCSI_ALIAS_SIZE		32	/* ISCSI Alias name size */
+#define ISCSI_NAME_SIZE			0xE0	/* ISCSI Name size */
+
+#define QL4_SESS_RECOVERY_TMO		120	/* iSCSI session */
+						/* recovery timeout */
+
+#define LSDW(x) ((u32)((u64)(x)))
+#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
+
+#define DEV_DB_NON_PERSISTENT	0
+#define DEV_DB_PERSISTENT	1
+
+#define QL4_ISP_REG_DISCONNECT 0xffffffffU
+
+#define COPY_ISID(dst_isid, src_isid) {			\
+	int i, j;					\
+	for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;)	\
+		dst_isid[i++] = src_isid[j--];		\
+}
+
+#define SET_BITVAL(o, n, v) {	\
+	if (o)			\
+		n |= v;		\
+	else			\
+		n &= ~v;	\
+}
+
+#define OP_STATE(o, f, p) {			\
+	p = (o & f) ? "enable" : "disable";	\
+}
+
+/*
+ * Retry & Timeout Values
+ */
+#define MBOX_TOV			60
+#define SOFT_RESET_TOV			30
+#define RESET_INTR_TOV			3
+#define SEMAPHORE_TOV			10
+#define ADAPTER_INIT_TOV		30
+#define ADAPTER_RESET_TOV		180
+#define EXTEND_CMD_TOV			60
+#define WAIT_CMD_TOV			5
+#define EH_WAIT_CMD_TOV			120
+#define FIRMWARE_UP_TOV			60
+#define RESET_FIRMWARE_TOV		30
+#define LOGOUT_TOV			10
+#define IOCB_TOV_MARGIN			10
+#define RELOGIN_TOV			18
+#define ISNS_DEREG_TOV			5
+#define HBA_ONLINE_TOV			30
+#define DISABLE_ACB_TOV			30
+#define IP_CONFIG_TOV			30
+#define LOGIN_TOV			12
+#define BOOT_LOGIN_RESP_TOV		60
+
+#define MAX_RESET_HA_RETRIES		2
+#define FW_ALIVE_WAIT_TOV		3
+#define IDC_EXTEND_TOV			8
+#define IDC_COMP_TOV			5
+#define LINK_UP_COMP_TOV		30
+
+#define CMD_SP(Cmnd)			((Cmnd)->SCp.ptr)
+
+/*
+ * SCSI Request Block structure	 (srb)	that is placed
+ * on cmd->SCp location of every I/O	 [We have 22 bytes available]
+ */
+struct srb {
+	struct list_head list;	/* (8)	 */
+	struct scsi_qla_host *ha;	/* HA the SP is queued on */
+	struct ddb_entry *ddb;
+	uint16_t flags;		/* (1) Status flags. */
+
+#define SRB_DMA_VALID		BIT_3	/* DMA Buffer mapped. */
+#define SRB_GOT_SENSE		BIT_4	/* sense data received. */
+	uint8_t state;		/* (1) Status flags. */
+
+#define SRB_NO_QUEUE_STATE	 0	/* Request is in between states */
+#define SRB_FREE_STATE		 1
+#define SRB_ACTIVE_STATE	 3
+#define SRB_ACTIVE_TIMEOUT_STATE 4
+#define SRB_SUSPENDED_STATE	 7	/* Request in suspended state */
+
+	struct scsi_cmnd *cmd;	/* (4) SCSI command block */
+	dma_addr_t dma_handle;	/* (4) for unmap of single transfers */
+	struct kref srb_ref;	/* reference count for this srb */
+	uint8_t err_id;		/* error id */
+#define SRB_ERR_PORT	   1	/* Request failed because "port down" */
+#define SRB_ERR_LOOP	   2	/* Request failed because "loop down" */
+#define SRB_ERR_DEVICE	   3	/* Request failed because "device error" */
+#define SRB_ERR_OTHER	   4
+
+	uint16_t reserved;
+	uint16_t iocb_tov;
+	uint16_t iocb_cnt;	/* Number of used iocbs */
+	uint16_t cc_stat;
+
+	/* Used for extended sense / status continuation */
+	uint8_t *req_sense_ptr;
+	uint16_t req_sense_len;
+	uint16_t reserved2;
+};
+
+/* Mailbox request block structure */
+struct mrb {
+	struct scsi_qla_host *ha;
+	struct mbox_cmd_iocb *mbox;
+	uint32_t mbox_cmd;
+	uint16_t iocb_cnt;		/* Number of used iocbs */
+	uint32_t pid;
+};
+
+/*
+ * Asynchronous Event Queue structure
+ */
+struct aen {
+        uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+};
+
+struct ql4_aen_log {
+        int count;
+        struct aen entry[MAX_AEN_ENTRIES];
+};
+
+/*
+ * Device Database (DDB) structure
+ */
+struct ddb_entry {
+	struct scsi_qla_host *ha;
+	struct iscsi_cls_session *sess;
+	struct iscsi_cls_conn *conn;
+
+	uint16_t fw_ddb_index;	/* DDB firmware index */
+	uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+	uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+	struct dev_db_entry fw_ddb_entry;
+	int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+	int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			  struct ddb_entry *ddb_entry, uint32_t state);
+
+	/* Driver Re-login  */
+	unsigned long flags;		  /* DDB Flags */
+#define DDB_CONN_CLOSE_FAILURE		0 /* 0x00000001 */
+
+	uint16_t default_relogin_timeout; /*  Max time to wait for
+					   *  relogin to complete */
+	atomic_t retry_relogin_timer;	  /* Min Time between relogins
+					   * (4000 only) */
+	atomic_t relogin_timer;		  /* Max Time to wait for
+					   * relogin to complete */
+	atomic_t relogin_retry_count;	  /* Num of times relogin has been
+					   * retried */
+	uint32_t default_time2wait;	  /* Default Min time between
+					   * relogins (+aens) */
+	uint16_t chap_tbl_idx;
+};
+
+struct qla_ddb_index {
+	struct list_head list;
+	uint16_t fw_ddb_idx;
+	uint16_t flash_ddb_idx;
+	struct dev_db_entry fw_ddb;
+	uint8_t flash_isid[6];
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+	int port;
+	int tpgt;
+	char ip_addr[DDB_IPADDR_LEN];
+	char iscsi_name[ISCSI_NAME_SIZE];
+	uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
+	uint8_t isid[6];
+};
+
+/*
+ * DDB states.
+ */
+#define DDB_STATE_DEAD		0	/* We can no longer talk to
+					 * this device */
+#define DDB_STATE_ONLINE	1	/* Device ready to accept
+					 * commands */
+#define DDB_STATE_MISSING	2	/* Device logged off, trying
+					 * to re-login */
+
+/*
+ * DDB flags.
+ */
+#define DF_RELOGIN		0	/* Relogin to device */
+#define DF_BOOT_TGT		1	/* Boot target entry */
+#define DF_ISNS_DISCOVERED	2	/* Device was discovered via iSNS */
+#define DF_FO_MASKED		3
+#define DF_DISABLE_RELOGIN		4	/* Disable relogin to device */
+
+enum qla4_work_type {
+	QLA4_EVENT_AEN,
+	QLA4_EVENT_PING_STATUS,
+};
+
+struct qla4_work_evt {
+	struct list_head list;
+	enum qla4_work_type type;
+	union {
+		struct {
+			enum iscsi_host_event_code code;
+			uint32_t data_size;
+			uint8_t data[0];
+		} aen;
+		struct {
+			uint32_t status;
+			uint32_t pid;
+			uint32_t data_size;
+			uint8_t data[0];
+		} ping;
+	} u;
+};
+
+struct ql82xx_hw_data {
+	/* Offsets for flash/nvram access (set to ~0 if not used). */
+	uint32_t flash_conf_off;
+	uint32_t flash_data_off;
+
+	uint32_t fdt_wrt_disable;
+	uint32_t fdt_erase_cmd;
+	uint32_t fdt_block_size;
+	uint32_t fdt_unprotect_sec_cmd;
+	uint32_t fdt_protect_sec_cmd;
+
+	uint32_t flt_region_flt;
+	uint32_t flt_region_fdt;
+	uint32_t flt_region_boot;
+	uint32_t flt_region_bootload;
+	uint32_t flt_region_fw;
+
+	uint32_t flt_iscsi_param;
+	uint32_t flt_region_chap;
+	uint32_t flt_chap_size;
+	uint32_t flt_region_ddb;
+	uint32_t flt_ddb_size;
+};
+
+struct qla4_8xxx_legacy_intr_set {
+	uint32_t int_vec_bit;
+	uint32_t tgt_status_reg;
+	uint32_t tgt_mask_reg;
+	uint32_t pci_int_reg;
+};
+
+/* MSI-X Support */
+#define QLA_MSIX_ENTRIES	2
+
+/*
+ * ISP Operations
+ */
+struct isp_operations {
+	int (*iospace_config) (struct scsi_qla_host *ha);
+	void (*pci_config) (struct scsi_qla_host *);
+	void (*disable_intrs) (struct scsi_qla_host *);
+	void (*enable_intrs) (struct scsi_qla_host *);
+	int (*start_firmware) (struct scsi_qla_host *);
+	int (*restart_firmware) (struct scsi_qla_host *);
+	irqreturn_t (*intr_handler) (int , void *);
+	void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
+	int (*need_reset) (struct scsi_qla_host *);
+	int (*reset_chip) (struct scsi_qla_host *);
+	int (*reset_firmware) (struct scsi_qla_host *);
+	void (*queue_iocb) (struct scsi_qla_host *);
+	void (*complete_iocb) (struct scsi_qla_host *);
+	uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
+	uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
+	int (*get_sys_info) (struct scsi_qla_host *);
+	uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong);
+	void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
+	int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
+	int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
+	int (*idc_lock) (struct scsi_qla_host *);
+	void (*idc_unlock) (struct scsi_qla_host *);
+	void (*rom_lock_recovery) (struct scsi_qla_host *);
+	void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
+	void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
+};
+
+struct ql4_mdump_size_table {
+	uint32_t size;
+	uint32_t size_cmask_02;
+	uint32_t size_cmask_04;
+	uint32_t size_cmask_08;
+	uint32_t size_cmask_10;
+	uint32_t size_cmask_FF;
+	uint32_t version;
+};
+
+/*qla4xxx ipaddress configuration details */
+struct ipaddress_config {
+	uint16_t ipv4_options;
+	uint16_t tcp_options;
+	uint16_t ipv4_vlan_tag;
+	uint8_t ipv4_addr_state;
+	uint8_t ip_address[IP_ADDR_LEN];
+	uint8_t subnet_mask[IP_ADDR_LEN];
+	uint8_t gateway[IP_ADDR_LEN];
+	uint32_t ipv6_options;
+	uint32_t ipv6_addl_options;
+	uint8_t ipv6_link_local_state;
+	uint8_t ipv6_addr0_state;
+	uint8_t ipv6_addr1_state;
+	uint8_t ipv6_default_router_state;
+	uint16_t ipv6_vlan_tag;
+	struct in6_addr ipv6_link_local_addr;
+	struct in6_addr ipv6_addr0;
+	struct in6_addr ipv6_addr1;
+	struct in6_addr ipv6_default_router_addr;
+	uint16_t eth_mtu_size;
+	uint16_t ipv4_port;
+	uint16_t ipv6_port;
+	uint8_t control;
+	uint16_t ipv6_tcp_options;
+	uint8_t tcp_wsf;
+	uint8_t ipv6_tcp_wsf;
+	uint8_t ipv4_tos;
+	uint8_t ipv4_cache_id;
+	uint8_t ipv6_cache_id;
+	uint8_t ipv4_alt_cid_len;
+	uint8_t ipv4_alt_cid[11];
+	uint8_t ipv4_vid_len;
+	uint8_t ipv4_vid[11];
+	uint8_t ipv4_ttl;
+	uint16_t ipv6_flow_lbl;
+	uint8_t ipv6_traffic_class;
+	uint8_t ipv6_hop_limit;
+	uint32_t ipv6_nd_reach_time;
+	uint32_t ipv6_nd_rexmit_timer;
+	uint32_t ipv6_nd_stale_timeout;
+	uint8_t ipv6_dup_addr_detect_count;
+	uint32_t ipv6_gw_advrt_mtu;
+	uint16_t def_timeout;
+	uint8_t abort_timer;
+	uint16_t iscsi_options;
+	uint16_t iscsi_max_pdu_size;
+	uint16_t iscsi_first_burst_len;
+	uint16_t iscsi_max_outstnd_r2t;
+	uint16_t iscsi_max_burst_len;
+	uint8_t iscsi_name[224];
+};
+
+#define QL4_CHAP_MAX_NAME_LEN 256
+#define QL4_CHAP_MAX_SECRET_LEN 100
+#define LOCAL_CHAP	0
+#define BIDI_CHAP	1
+
+struct ql4_chap_format {
+	u8  intr_chap_name[QL4_CHAP_MAX_NAME_LEN];
+	u8  intr_secret[QL4_CHAP_MAX_SECRET_LEN];
+	u8  target_chap_name[QL4_CHAP_MAX_NAME_LEN];
+	u8  target_secret[QL4_CHAP_MAX_SECRET_LEN];
+	u16 intr_chap_name_length;
+	u16 intr_secret_length;
+	u16 target_chap_name_length;
+	u16 target_secret_length;
+};
+
+struct ip_address_format {
+	u8 ip_type;
+	u8 ip_address[16];
+};
+
+struct	ql4_conn_info {
+	u16	dest_port;
+	struct	ip_address_format dest_ipaddr;
+	struct	ql4_chap_format chap;
+};
+
+struct ql4_boot_session_info {
+	u8	target_name[224];
+	struct	ql4_conn_info conn_list[1];
+};
+
+struct ql4_boot_tgt_info {
+	struct ql4_boot_session_info boot_pri_sess;
+	struct ql4_boot_session_info boot_sec_sess;
+};
+
+/*
+ * Linux Host Adapter structure
+ */
+struct scsi_qla_host {
+	/* Linux adapter configuration data */
+	unsigned long flags;
+
+#define AF_ONLINE			0 /* 0x00000001 */
+#define AF_INIT_DONE			1 /* 0x00000002 */
+#define AF_MBOX_COMMAND			2 /* 0x00000004 */
+#define AF_MBOX_COMMAND_DONE		3 /* 0x00000008 */
+#define AF_ST_DISCOVERY_IN_PROGRESS	4 /* 0x00000010 */
+#define AF_INTERRUPTS_ON		6 /* 0x00000040 */
+#define AF_GET_CRASH_RECORD		7 /* 0x00000080 */
+#define AF_LINK_UP			8 /* 0x00000100 */
+#define AF_LOOPBACK			9 /* 0x00000200 */
+#define AF_IRQ_ATTACHED			10 /* 0x00000400 */
+#define AF_DISABLE_ACB_COMPLETE		11 /* 0x00000800 */
+#define AF_HA_REMOVAL			12 /* 0x00001000 */
+#define AF_MBOX_COMMAND_NOPOLL		18 /* 0x00040000 */
+#define AF_FW_RECOVERY			19 /* 0x00080000 */
+#define AF_EEH_BUSY			20 /* 0x00100000 */
+#define AF_PCI_CHANNEL_IO_PERM_FAILURE	21 /* 0x00200000 */
+#define AF_BUILD_DDB_LIST		22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED		24 /* 0x01000000 */
+#define AF_8XXX_RST_OWNER		25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING		26 /* 0x04000000 */
+#define AF_83XX_IOCB_INTR_ON		28 /* 0x10000000 */
+#define AF_83XX_MBOX_INTR_ON		29 /* 0x20000000 */
+
+	unsigned long dpc_flags;
+
+#define DPC_RESET_HA			1 /* 0x00000002 */
+#define DPC_RETRY_RESET_HA		2 /* 0x00000004 */
+#define DPC_RELOGIN_DEVICE		3 /* 0x00000008 */
+#define DPC_RESET_HA_FW_CONTEXT		4 /* 0x00000010 */
+#define DPC_RESET_HA_INTR		5 /* 0x00000020 */
+#define DPC_ISNS_RESTART		7 /* 0x00000080 */
+#define DPC_AEN				9 /* 0x00000200 */
+#define DPC_GET_DHCP_IP_ADDR		15 /* 0x00008000 */
+#define DPC_LINK_CHANGED		18 /* 0x00040000 */
+#define DPC_RESET_ACTIVE		20 /* 0x00100000 */
+#define DPC_HA_UNRECOVERABLE		21 /* 0x00200000 ISP-82xx only*/
+#define DPC_HA_NEED_QUIESCENT		22 /* 0x00400000 ISP-82xx only*/
+#define DPC_POST_IDC_ACK		23 /* 0x00800000 */
+#define DPC_RESTORE_ACB			24 /* 0x01000000 */
+#define DPC_SYSFS_DDB_EXPORT		25 /* 0x02000000 */
+
+	struct Scsi_Host *host; /* pointer to host data */
+	uint32_t tot_ddbs;
+
+	uint16_t iocb_cnt;
+	uint16_t iocb_hiwat;
+
+	/* SRB cache. */
+#define SRB_MIN_REQ	128
+	mempool_t *srb_mempool;
+
+	/* pci information */
+	struct pci_dev *pdev;
+
+	struct isp_reg __iomem *reg; /* Base I/O address */
+	unsigned long pio_address;
+	unsigned long pio_length;
+#define MIN_IOBASE_LEN		0x100
+
+	uint16_t req_q_count;
+
+	unsigned long host_no;
+
+	/* NVRAM registers */
+	struct eeprom_data *nvram;
+	spinlock_t hardware_lock ____cacheline_aligned;
+	uint32_t eeprom_cmd_data;
+
+	/* Counters for general statistics */
+	uint64_t isr_count;
+	uint64_t adapter_error_count;
+	uint64_t device_error_count;
+	uint64_t total_io_count;
+	uint64_t total_mbytes_xferred;
+	uint64_t link_failure_count;
+	uint64_t invalid_crc_count;
+	uint32_t bytes_xfered;
+	uint32_t spurious_int_count;
+	uint32_t aborted_io_count;
+	uint32_t io_timeout_count;
+	uint32_t mailbox_timeout_count;
+	uint32_t seconds_since_last_intr;
+	uint32_t seconds_since_last_heartbeat;
+	uint32_t mac_index;
+
+	/* Info Needed for Management App */
+	/* --- From GetFwVersion --- */
+	uint32_t firmware_version[2];
+	uint32_t patch_number;
+	uint32_t build_number;
+	uint32_t board_id;
+
+	/* --- From Init_FW --- */
+	/* init_cb_t *init_cb; */
+	uint16_t firmware_options;
+	uint8_t alias[32];
+	uint8_t name_string[256];
+	uint8_t heartbeat_interval;
+
+	/* --- From FlashSysInfo --- */
+	uint8_t my_mac[MAC_ADDR_LEN];
+	uint8_t serial_number[16];
+	uint16_t port_num;
+	/* --- From GetFwState --- */
+	uint32_t firmware_state;
+	uint32_t addl_fw_state;
+
+	/* Linux kernel thread */
+	struct workqueue_struct *dpc_thread;
+	struct work_struct dpc_work;
+
+	/* Linux timer thread */
+	struct timer_list timer;
+	uint32_t timer_active;
+
+	/* Recovery Timers */
+	atomic_t check_relogin_timeouts;
+	uint32_t retry_reset_ha_cnt;
+	uint32_t isp_reset_timer;	/* reset test timer */
+	uint32_t nic_reset_timer;	/* simulated nic reset test timer */
+	int eh_start;
+	struct list_head free_srb_q;
+	uint16_t free_srb_q_count;
+	uint16_t num_srbs_allocated;
+
+	/* DMA Memory Block */
+	void *queues;
+	dma_addr_t queues_dma;
+	unsigned long queues_len;
+
+#define MEM_ALIGN_VALUE \
+	    ((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
+	     sizeof(struct queue_entry))
+	/* request and response queue variables */
+	dma_addr_t request_dma;
+	struct queue_entry *request_ring;
+	struct queue_entry *request_ptr;
+	dma_addr_t response_dma;
+	struct queue_entry *response_ring;
+	struct queue_entry *response_ptr;
+	dma_addr_t shadow_regs_dma;
+	struct shadow_regs *shadow_regs;
+	uint16_t request_in;	/* Current indexes. */
+	uint16_t request_out;
+	uint16_t response_in;
+	uint16_t response_out;
+
+	/* aen queue variables */
+	uint16_t aen_q_count;	/* Number of available aen_q entries */
+	uint16_t aen_in;	/* Current indexes */
+	uint16_t aen_out;
+	struct aen aen_q[MAX_AEN_ENTRIES];
+
+	struct ql4_aen_log aen_log;/* tracks all aens */
+
+	/* This mutex protects several threads to do mailbox commands
+	 * concurrently.
+	 */
+	struct mutex  mbox_sem;
+
+	/* temporary mailbox status registers */
+	volatile uint8_t mbox_status_count;
+	volatile uint32_t mbox_status[MBOX_REG_COUNT];
+
+	/* FW ddb index map */
+	struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
+
+	/* Saved srb for status continuation entry processing */
+	struct srb *status_srb;
+
+	uint8_t acb_version;
+
+	/* qla82xx specific fields */
+	struct device_reg_82xx  __iomem *qla4_82xx_reg; /* Base I/O address */
+	unsigned long nx_pcibase;	/* Base I/O address */
+	uint8_t *nx_db_rd_ptr;		/* Doorbell read pointer */
+	unsigned long nx_db_wr_ptr;	/* Door bell write pointer */
+	unsigned long first_page_group_start;
+	unsigned long first_page_group_end;
+
+	uint32_t crb_win;
+	uint32_t curr_window;
+	uint32_t ddr_mn_window;
+	unsigned long mn_win_crb;
+	unsigned long ms_win_crb;
+	int qdr_sn_window;
+	rwlock_t hw_lock;
+	uint16_t func_num;
+	int link_width;
+
+	struct qla4_8xxx_legacy_intr_set nx_legacy_intr;
+	u32 nx_crb_mask;
+
+	uint8_t revision_id;
+	uint32_t fw_heartbeat_counter;
+
+	struct isp_operations *isp_ops;
+	struct ql82xx_hw_data hw;
+
+	uint32_t nx_dev_init_timeout;
+	uint32_t nx_reset_timeout;
+	void *fw_dump;
+	uint32_t fw_dump_size;
+	uint32_t fw_dump_capture_mask;
+	void *fw_dump_tmplt_hdr;
+	uint32_t fw_dump_tmplt_size;
+	uint32_t fw_dump_skip_size;
+
+	struct completion mbx_intr_comp;
+
+	struct ipaddress_config ip_config;
+	struct iscsi_iface *iface_ipv4;
+	struct iscsi_iface *iface_ipv6_0;
+	struct iscsi_iface *iface_ipv6_1;
+
+	/* --- From About Firmware --- */
+	struct about_fw_info fw_info;
+	uint32_t fw_uptime_secs;  /* seconds elapsed since fw bootup */
+	uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */
+	uint16_t def_timeout; /* Default login timeout */
+
+	uint32_t flash_state;
+#define	QLFLASH_WAITING		0
+#define	QLFLASH_READING		1
+#define	QLFLASH_WRITING		2
+	struct dma_pool *chap_dma_pool;
+	uint8_t *chap_list; /* CHAP table cache */
+	struct mutex  chap_sem;
+
+#define CHAP_DMA_BLOCK_SIZE    512
+	struct workqueue_struct *task_wq;
+	unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
+#define SYSFS_FLAG_FW_SEL_BOOT 2
+	struct iscsi_boot_kset *boot_kset;
+	struct ql4_boot_tgt_info boot_tgt;
+	uint16_t phy_port_num;
+	uint16_t phy_port_cnt;
+	uint16_t iscsi_pci_func_cnt;
+	uint8_t model_name[16];
+	struct completion disable_acb_comp;
+	struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+	uint16_t pri_ddb_idx;
+	uint16_t sec_ddb_idx;
+	int is_reset;
+	uint16_t temperature;
+
+	/* event work list */
+	struct list_head work_list;
+	spinlock_t work_lock;
+
+	/* mbox iocb */
+#define MAX_MRB		128
+	struct mrb *active_mrb_array[MAX_MRB];
+	uint32_t mrb_index;
+
+	uint32_t *reg_tbl;
+	struct qla4_83xx_reset_template reset_tmplt;
+	struct device_reg_83xx  __iomem *qla4_83xx_reg; /* Base I/O address
+							   for ISP8324 and
+							   and ISP8042 */
+	uint32_t pf_bit;
+	struct qla4_83xx_idc_information idc_info;
+	struct addr_ctrl_blk *saved_acb;
+	int notify_idc_comp;
+	int notify_link_up_comp;
+	int idc_extend_tmo;
+	struct completion idc_comp;
+	struct completion link_up_comp;
+};
+
+struct ql4_task_data {
+	struct scsi_qla_host *ha;
+	uint8_t iocb_req_cnt;
+	dma_addr_t data_dma;
+	void *req_buffer;
+	dma_addr_t req_dma;
+	uint32_t req_len;
+	void *resp_buffer;
+	dma_addr_t resp_dma;
+	uint32_t resp_len;
+	struct iscsi_task *task;
+	struct passthru_status sts;
+	struct work_struct task_work;
+};
+
+struct qla_endpoint {
+	struct Scsi_Host *host;
+	struct sockaddr_storage dst_addr;
+};
+
+struct qla_conn {
+	struct qla_endpoint *qla_ep;
+};
+
+static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
+{
+	return ((ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
+{
+	return ((ha->ip_config.ipv6_options &
+		IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
+}
+
+static inline int is_qla4010(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
+}
+
+static inline int is_qla4022(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
+}
+
+static inline int is_qla4032(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
+}
+
+static inline int is_qla40XX(struct scsi_qla_host *ha)
+{
+	return is_qla4032(ha) || is_qla4022(ha) || is_qla4010(ha);
+}
+
+static inline int is_qla8022(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
+}
+
+static inline int is_qla8032(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
+}
+
+static inline int is_qla8042(struct scsi_qla_host *ha)
+{
+	return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042;
+}
+
+static inline int is_qla80XX(struct scsi_qla_host *ha)
+{
+	return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha);
+}
+
+static inline int is_aer_supported(struct scsi_qla_host *ha)
+{
+	return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
+		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) ||
+		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));
+}
+
+static inline int adapter_up(struct scsi_qla_host *ha)
+{
+	return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
+	       (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
+	       (!test_bit(AF_LOOPBACK, &ha->flags));
+}
+
+static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
+{
+	return (struct scsi_qla_host *)iscsi_host_priv(shost);
+}
+
+static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u1.isp4010.nvram :
+		&ha->reg->u1.isp4022.semaphore);
+}
+
+static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u1.isp4010.nvram :
+		&ha->reg->u1.isp4022.nvram);
+}
+
+static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.ext_hw_conf :
+		&ha->reg->u2.isp4022.p0.ext_hw_conf);
+}
+
+static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.port_status :
+		&ha->reg->u2.isp4022.p0.port_status);
+}
+
+static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.port_ctrl :
+		&ha->reg->u2.isp4022.p0.port_ctrl);
+}
+
+static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.port_err_status :
+		&ha->reg->u2.isp4022.p0.port_err_status);
+}
+
+static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		&ha->reg->u2.isp4010.gp_out :
+		&ha->reg->u2.isp4022.p0.gp_out);
+}
+
+static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
+{
+	return (is_qla4010(ha) ?
+		offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
+		offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
+}
+
+int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
+void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
+int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
+
+static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
+{
+	if (is_qla4010(a))
+		return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
+					   QL4010_FLASH_SEM_BITS);
+	else
+		return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
+					   (QL4022_RESOURCE_BITS_BASE_CODE |
+					    (a->mac_index)) << 13);
+}
+
+static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
+{
+	if (is_qla4010(a))
+		ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
+}
+
+static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
+{
+	if (is_qla4010(a))
+		return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
+					   QL4010_NVRAM_SEM_BITS);
+	else
+		return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
+					   (QL4022_RESOURCE_BITS_BASE_CODE |
+					    (a->mac_index)) << 10);
+}
+
+static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
+{
+	if (is_qla4010(a))
+		ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
+}
+
+static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
+{
+	if (is_qla4010(a))
+		return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
+				       QL4010_DRVR_SEM_BITS);
+	else
+		return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
+				       (QL4022_RESOURCE_BITS_BASE_CODE |
+					(a->mac_index)) << 1);
+}
+
+static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
+{
+	if (is_qla4010(a))
+		ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
+	else
+		ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
+}
+
+static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
+{
+	return test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+	       test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	       test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+	       test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+	       test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+	       test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+
+}
+
+static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha,
+				      const uint32_t crb_reg)
+{
+	return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]);
+}
+
+static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha,
+				       const uint32_t crb_reg,
+				       const uint32_t value)
+{
+	ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER    0
+#define RESET_ADAPTER   1
+
+#define PRESERVE_DDB_LIST	0
+#define REBUILD_DDB_LIST	1
+
+/* Defines for process_aen() */
+#define PROCESS_ALL_AENS	 0
+#define FLUSH_DDB_CHANGED_AENS	 1
+
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP		0
+
+#endif	/*_QLA4XXX_H */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_fw.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_fw.h
new file mode 100644
index 0000000..699575e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_fw.h
@@ -0,0 +1,1443 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef _QLA4X_FW_H
+#define _QLA4X_FW_H
+
+
+#define MAX_PRST_DEV_DB_ENTRIES		64
+#define MIN_DISC_DEV_DB_ENTRY		MAX_PRST_DEV_DB_ENTRIES
+#define MAX_DEV_DB_ENTRIES		512
+#define MAX_DEV_DB_ENTRIES_40XX		256
+
+/*************************************************************************
+ *
+ *		ISP 4010 I/O Register Set Structure and Definitions
+ *
+ *************************************************************************/
+
+struct port_ctrl_stat_regs {
+	__le32 ext_hw_conf;	/* 0x50  R/W */
+	__le32 rsrvd0;		/* 0x54 */
+	__le32 port_ctrl;	/* 0x58 */
+	__le32 port_status;	/* 0x5c */
+	__le32 rsrvd1[32];	/* 0x60-0xdf */
+	__le32 gp_out;		/* 0xe0 */
+	__le32 gp_in;		/* 0xe4 */
+	__le32 rsrvd2[5];	/* 0xe8-0xfb */
+	__le32 port_err_status; /* 0xfc */
+};
+
+struct host_mem_cfg_regs {
+	__le32 rsrvd0[12];	/* 0x50-0x79 */
+	__le32 req_q_out;	/* 0x80 */
+	__le32 rsrvd1[31];	/* 0x84-0xFF */
+};
+
+/*
+ * ISP 82xx I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+	__le32 req_q_out;	/* 0x0000 (R): Request Queue out-Pointer. */
+	__le32 reserve1[63];	/* Request Queue out-Pointer. (64 * 4) */
+	__le32 rsp_q_in;	/* 0x0100 (R/W): Response Queue In-Pointer. */
+	__le32 reserve2[63];	/* Response Queue In-Pointer. */
+	__le32 rsp_q_out;	/* 0x0200 (R/W): Response Queue Out-Pointer. */
+	__le32 reserve3[63];	/* Response Queue Out-Pointer. */
+
+	__le32 mailbox_in[8];	/* 0x0300 (R/W): Mail box In registers */
+	__le32 reserve4[24];
+	__le32 hint;		/* 0x0380 (R/W): Host interrupt register */
+#define HINT_MBX_INT_PENDING	BIT_0
+	__le32 reserve5[31];
+	__le32 mailbox_out[8];	/* 0x0400 (R): Mail box Out registers */
+	__le32 reserve6[56];
+
+	__le32 host_status;	/* Offset 0x500 (R): host status */
+#define HSRX_RISC_MB_INT	BIT_0  /* RISC to Host Mailbox interrupt */
+#define HSRX_RISC_IOCB_INT	BIT_1  /* RISC to Host IOCB interrupt */
+
+	__le32 host_int;	/* Offset 0x0504 (R/W): Interrupt status. */
+#define ISRX_82XX_RISC_INT	BIT_0 /* RISC interrupt. */
+};
+
+/* ISP 83xx I/O Register Set structure */
+struct device_reg_83xx {
+	__le32 mailbox_in[16];	/* 0x0000 */
+	__le32 reserve1[496];	/* 0x0040 */
+	__le32 mailbox_out[16];	/* 0x0800 */
+	__le32 reserve2[496];
+	__le32 mbox_int;	/* 0x1000 */
+	__le32 reserve3[63];
+	__le32 req_q_out;	/* 0x1100 */
+	__le32 reserve4[63];
+
+	__le32 rsp_q_in;	/* 0x1200 */
+	__le32 reserve5[1919];
+
+	__le32 req_q_in;	/* 0x3000 */
+	__le32 reserve6[3];
+	__le32 iocb_int_mask;	/* 0x3010 */
+	__le32 reserve7[3];
+	__le32 rsp_q_out;	/* 0x3020 */
+	__le32 reserve8[3];
+	__le32 anonymousbuff;	/* 0x3030 */
+	__le32 mb_int_mask;	/* 0x3034 */
+
+	__le32 host_intr;	/* 0x3038 - Host Interrupt Register */
+	__le32 risc_intr;	/* 0x303C - RISC Interrupt Register */
+	__le32 reserve9[544];
+	__le32 leg_int_ptr;	/* 0x38C0 - Legacy Interrupt Pointer Register */
+	__le32 leg_int_trig;	/* 0x38C4 - Legacy Interrupt Trigger Control */
+	__le32 leg_int_mask;	/* 0x38C8 - Legacy Interrupt Mask Register */
+};
+
+#define INT_ENABLE_FW_MB	(1 << 2)
+#define INT_MASK_FW_MB		(1 << 2)
+
+/*  remote register set (access via PCI memory read/write) */
+struct isp_reg {
+#define MBOX_REG_COUNT 8
+	__le32 mailbox[MBOX_REG_COUNT];
+
+	__le32 flash_address;	/* 0x20 */
+	__le32 flash_data;
+	__le32 ctrl_status;
+
+	union {
+		struct {
+			__le32 nvram;
+			__le32 reserved1[2]; /* 0x30 */
+		} __attribute__ ((packed)) isp4010;
+		struct {
+			__le32 intr_mask;
+			__le32 nvram; /* 0x30 */
+			__le32 semaphore;
+		} __attribute__ ((packed)) isp4022;
+	} u1;
+
+	__le32 req_q_in;    /* SCSI Request Queue Producer Index */
+	__le32 rsp_q_out;   /* SCSI Completion Queue Consumer Index */
+
+	__le32 reserved2[4];	/* 0x40 */
+
+	union {
+		struct {
+			__le32 ext_hw_conf; /* 0x50 */
+			__le32 flow_ctrl;
+			__le32 port_ctrl;
+			__le32 port_status;
+
+			__le32 reserved3[8]; /* 0x60 */
+
+			__le32 req_q_out; /* 0x80 */
+
+			__le32 reserved4[23]; /* 0x84 */
+
+			__le32 gp_out; /* 0xe0 */
+			__le32 gp_in;
+
+			__le32 reserved5[5];
+
+			__le32 port_err_status; /* 0xfc */
+		} __attribute__ ((packed)) isp4010;
+		struct {
+			union {
+				struct port_ctrl_stat_regs p0;
+				struct host_mem_cfg_regs p1;
+			};
+		} __attribute__ ((packed)) isp4022;
+	} u2;
+};				/* 256 x100 */
+
+
+/* Semaphore Defines for 4010 */
+#define QL4010_DRVR_SEM_BITS	0x00000030
+#define QL4010_GPIO_SEM_BITS	0x000000c0
+#define QL4010_SDRAM_SEM_BITS	0x00000300
+#define QL4010_PHY_SEM_BITS	0x00000c00
+#define QL4010_NVRAM_SEM_BITS	0x00003000
+#define QL4010_FLASH_SEM_BITS	0x0000c000
+
+#define QL4010_DRVR_SEM_MASK	0x00300000
+#define QL4010_GPIO_SEM_MASK	0x00c00000
+#define QL4010_SDRAM_SEM_MASK	0x03000000
+#define QL4010_PHY_SEM_MASK	0x0c000000
+#define QL4010_NVRAM_SEM_MASK	0x30000000
+#define QL4010_FLASH_SEM_MASK	0xc0000000
+
+/* Semaphore Defines for 4022 */
+#define QL4022_RESOURCE_MASK_BASE_CODE 0x7
+#define QL4022_RESOURCE_BITS_BASE_CODE 0x4
+
+
+#define QL4022_DRVR_SEM_MASK	(QL4022_RESOURCE_MASK_BASE_CODE << (1+16))
+#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16))
+#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16))
+#define QL4022_NVRAM_SEM_MASK	(QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
+#define QL4022_FLASH_SEM_MASK	(QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
+
+/* nvram address for 4032 */
+#define NVRAM_PORT0_BOOT_MODE		0x03b1
+#define NVRAM_PORT0_BOOT_PRI_TGT	0x03b2
+#define NVRAM_PORT0_BOOT_SEC_TGT	0x03bb
+#define NVRAM_PORT1_BOOT_MODE		0x07b1
+#define NVRAM_PORT1_BOOT_PRI_TGT	0x07b2
+#define NVRAM_PORT1_BOOT_SEC_TGT	0x07bb
+
+
+/* Page # defines for 4022 */
+#define PORT_CTRL_STAT_PAGE			0	/* 4022 */
+#define HOST_MEM_CFG_PAGE			1	/* 4022 */
+#define LOCAL_RAM_CFG_PAGE			2	/* 4022 */
+#define PROT_STAT_PAGE				3	/* 4022 */
+
+/* Register Mask - sets corresponding mask bits in the upper word */
+static inline uint32_t set_rmask(uint32_t val)
+{
+	return (val & 0xffff) | (val << 16);
+}
+
+
+static inline uint32_t clr_rmask(uint32_t val)
+{
+	return 0 | (val << 16);
+}
+
+/*  ctrl_status definitions */
+#define CSR_SCSI_PAGE_SELECT			0x00000003
+#define CSR_SCSI_INTR_ENABLE			0x00000004	/* 4010 */
+#define CSR_SCSI_RESET_INTR			0x00000008
+#define CSR_SCSI_COMPLETION_INTR		0x00000010
+#define CSR_SCSI_PROCESSOR_INTR			0x00000020
+#define CSR_INTR_RISC				0x00000040
+#define CSR_BOOT_ENABLE				0x00000080
+#define CSR_NET_PAGE_SELECT			0x00000300	/* 4010 */
+#define CSR_FUNC_NUM				0x00000700	/* 4022 */
+#define CSR_NET_RESET_INTR			0x00000800	/* 4010 */
+#define CSR_FORCE_SOFT_RESET			0x00002000	/* 4022 */
+#define CSR_FATAL_ERROR				0x00004000
+#define CSR_SOFT_RESET				0x00008000
+#define ISP_CONTROL_FN_MASK			CSR_FUNC_NUM
+#define ISP_CONTROL_FN0_SCSI			0x0500
+#define ISP_CONTROL_FN1_SCSI			0x0700
+
+#define INTR_PENDING				(CSR_SCSI_COMPLETION_INTR |\
+						 CSR_SCSI_PROCESSOR_INTR |\
+						 CSR_SCSI_RESET_INTR)
+
+/* ISP InterruptMask definitions */
+#define IMR_SCSI_INTR_ENABLE			0x00000004	/* 4022 */
+
+/* ISP 4022 nvram definitions */
+#define NVR_WRITE_ENABLE			0x00000010	/* 4022 */
+
+#define QL4010_NVRAM_SIZE			0x200
+#define QL40X2_NVRAM_SIZE			0x800
+
+/*  ISP port_status definitions */
+
+/*  ISP Semaphore definitions */
+
+/*  ISP General Purpose Output definitions */
+#define GPOR_TOPCAT_RESET			0x00000004
+
+/*  shadow registers (DMA'd from HA to system memory.  read only) */
+struct shadow_regs {
+	/* SCSI Request Queue Consumer Index */
+	__le32 req_q_out;	/*  0 x0   R */
+
+	/* SCSI Completion Queue Producer Index */
+	__le32 rsp_q_in;	/*  4 x4   R */
+};		  /*  8 x8 */
+
+
+/*  External hardware configuration register */
+union external_hw_config_reg {
+	struct {
+		/* FIXME: Do we even need this?	 All values are
+		 * referred to by 16 bit quantities.  Platform and
+		 * endianess issues. */
+		__le32 bReserved0:1;
+		__le32 bSDRAMProtectionMethod:2;
+		__le32 bSDRAMBanks:1;
+		__le32 bSDRAMChipWidth:1;
+		__le32 bSDRAMChipSize:2;
+		__le32 bParityDisable:1;
+		__le32 bExternalMemoryType:1;
+		__le32 bFlashBIOSWriteEnable:1;
+		__le32 bFlashUpperBankSelect:1;
+		__le32 bWriteBurst:2;
+		__le32 bReserved1:3;
+		__le32 bMask:16;
+	};
+	uint32_t Asuint32_t;
+};
+
+/* 82XX Support  start */
+/* 82xx Default FLT Addresses */
+#define FA_FLASH_LAYOUT_ADDR_82		0xFC400
+#define FA_FLASH_DESCR_ADDR_82		0xFC000
+#define FA_BOOT_LOAD_ADDR_82		0x04000
+#define FA_BOOT_CODE_ADDR_82		0x20000
+#define FA_RISC_CODE_ADDR_82		0x40000
+#define FA_GOLD_RISC_CODE_ADDR_82	0x80000
+#define FA_FLASH_ISCSI_CHAP		0x540000
+#define FA_FLASH_CHAP_SIZE		0xC0000
+#define FA_FLASH_ISCSI_DDB		0x420000
+#define FA_FLASH_DDB_SIZE		0x080000
+
+/* Flash Description Table */
+struct qla_fdt_layout {
+	uint8_t sig[4];
+	uint16_t version;
+	uint16_t len;
+	uint16_t checksum;
+	uint8_t unused1[2];
+	uint8_t model[16];
+	uint16_t man_id;
+	uint16_t id;
+	uint8_t flags;
+	uint8_t erase_cmd;
+	uint8_t alt_erase_cmd;
+	uint8_t wrt_enable_cmd;
+	uint8_t wrt_enable_bits;
+	uint8_t wrt_sts_reg_cmd;
+	uint8_t unprotect_sec_cmd;
+	uint8_t read_man_id_cmd;
+	uint32_t block_size;
+	uint32_t alt_block_size;
+	uint32_t flash_size;
+	uint32_t wrt_enable_data;
+	uint8_t read_id_addr_len;
+	uint8_t wrt_disable_bits;
+	uint8_t read_dev_id_len;
+	uint8_t chip_erase_cmd;
+	uint16_t read_timeout;
+	uint8_t protect_sec_cmd;
+	uint8_t unused2[65];
+};
+
+/* Flash Layout Table */
+
+struct qla_flt_location {
+	uint8_t sig[4];
+	uint16_t start_lo;
+	uint16_t start_hi;
+	uint8_t version;
+	uint8_t unused[5];
+	uint16_t checksum;
+};
+
+struct qla_flt_header {
+	uint16_t version;
+	uint16_t length;
+	uint16_t checksum;
+	uint16_t unused;
+};
+
+/* 82xx FLT Regions */
+#define FLT_REG_FDT		0x1a
+#define FLT_REG_FLT		0x1c
+#define FLT_REG_BOOTLOAD_82	0x72
+#define FLT_REG_FW_82		0x74
+#define FLT_REG_FW_82_1		0x97
+#define FLT_REG_GOLD_FW_82	0x75
+#define FLT_REG_BOOT_CODE_82	0x78
+#define FLT_REG_ISCSI_PARAM	0x65
+#define FLT_REG_ISCSI_CHAP	0x63
+#define FLT_REG_ISCSI_DDB	0x6A
+
+struct qla_flt_region {
+	uint32_t code;
+	uint32_t size;
+	uint32_t start;
+	uint32_t end;
+};
+
+/*************************************************************************
+ *
+ *		Mailbox Commands Structures and Definitions
+ *
+ *************************************************************************/
+
+/*  Mailbox command definitions */
+#define MBOX_CMD_ABOUT_FW			0x0009
+#define MBOX_CMD_PING				0x000B
+#define PING_IPV6_PROTOCOL_ENABLE		0x1
+#define PING_IPV6_LINKLOCAL_ADDR		0x4
+#define PING_IPV6_ADDR0				0x8
+#define PING_IPV6_ADDR1				0xC
+#define MBOX_CMD_ENABLE_INTRS			0x0010
+#define INTR_DISABLE				0
+#define INTR_ENABLE				1
+#define MBOX_CMD_STOP_FW			0x0014
+#define MBOX_CMD_ABORT_TASK			0x0015
+#define MBOX_CMD_LUN_RESET			0x0016
+#define MBOX_CMD_TARGET_WARM_RESET		0x0017
+#define MBOX_CMD_GET_MANAGEMENT_DATA		0x001E
+#define MBOX_CMD_GET_FW_STATUS			0x001F
+#define MBOX_CMD_SET_ISNS_SERVICE		0x0021
+#define ISNS_DISABLE				0
+#define ISNS_ENABLE				1
+#define MBOX_CMD_COPY_FLASH			0x0024
+#define MBOX_CMD_WRITE_FLASH			0x0025
+#define MBOX_CMD_READ_FLASH			0x0026
+#define MBOX_CMD_CLEAR_DATABASE_ENTRY		0x0031
+#define MBOX_CMD_CONN_OPEN			0x0074
+#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT		0x0056
+#define DDB_NOT_LOGGED_IN			0x09
+#define LOGOUT_OPTION_CLOSE_SESSION		0x0002
+#define LOGOUT_OPTION_RELOGIN			0x0004
+#define LOGOUT_OPTION_FREE_DDB			0x0008
+#define MBOX_CMD_SET_PARAM			0x0059
+#define SET_DRVR_VERSION			0x200
+#define MAX_DRVR_VER_LEN			24
+#define MBOX_CMD_EXECUTE_IOCB_A64		0x005A
+#define MBOX_CMD_INITIALIZE_FIRMWARE		0x0060
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK		0x0061
+#define MBOX_CMD_REQUEST_DATABASE_ENTRY		0x0062
+#define MBOX_CMD_SET_DATABASE_ENTRY		0x0063
+#define MBOX_CMD_GET_DATABASE_ENTRY		0x0064
+#define DDB_DS_UNASSIGNED			0x00
+#define DDB_DS_NO_CONNECTION_ACTIVE		0x01
+#define DDB_DS_DISCOVERY			0x02
+#define DDB_DS_SESSION_ACTIVE			0x04
+#define DDB_DS_SESSION_FAILED			0x06
+#define DDB_DS_LOGIN_IN_PROCESS			0x07
+#define MBOX_CMD_GET_FW_STATE			0x0069
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
+#define MBOX_CMD_DIAG_TEST			0x0075
+#define MBOX_CMD_GET_SYS_INFO			0x0078
+#define MBOX_CMD_GET_NVRAM			0x0078	/* For 40xx */
+#define MBOX_CMD_SET_NVRAM			0x0079	/* For 40xx */
+#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS	0x0087
+#define MBOX_CMD_SET_ACB			0x0088
+#define MBOX_CMD_GET_ACB			0x0089
+#define MBOX_CMD_DISABLE_ACB			0x008A
+#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE	0x008B
+#define MBOX_CMD_GET_IPV6_DEST_CACHE		0x008C
+#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST	0x008D
+#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST	0x008E
+#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE	0x0090
+#define MBOX_CMD_GET_IP_ADDR_STATE		0x0091
+#define MBOX_CMD_SEND_IPV6_ROUTER_SOL		0x0092
+#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR	0x0093
+#define MBOX_CMD_SET_PORT_CONFIG		0x0122
+#define MBOX_CMD_GET_PORT_CONFIG		0x0123
+#define MBOX_CMD_SET_LED_CONFIG			0x0125
+#define MBOX_CMD_GET_LED_CONFIG			0x0126
+#define MBOX_CMD_MINIDUMP			0x0129
+
+/* Port Config */
+#define ENABLE_INTERNAL_LOOPBACK		0x04
+#define ENABLE_EXTERNAL_LOOPBACK		0x08
+#define ENABLE_DCBX				0x10
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND		0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND		0x01
+
+/* Mailbox 1 */
+#define FW_STATE_READY				0x0000
+#define FW_STATE_CONFIG_WAIT			0x0001
+#define FW_STATE_WAIT_AUTOCONNECT		0x0002
+#define FW_STATE_ERROR				0x0004
+#define FW_STATE_CONFIGURING_IP			0x0008
+
+/* Mailbox 3 */
+#define FW_ADDSTATE_OPTICAL_MEDIA		0x0001
+#define FW_ADDSTATE_DHCPv4_ENABLED		0x0002
+#define FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED	0x0004
+#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED	0x0008
+#define FW_ADDSTATE_LINK_UP			0x0010
+#define FW_ADDSTATE_ISNS_SVC_ENABLED		0x0020
+#define FW_ADDSTATE_LINK_SPEED_10MBPS		0x0100
+#define FW_ADDSTATE_LINK_SPEED_100MBPS		0x0200
+#define FW_ADDSTATE_LINK_SPEED_1GBPS		0x0400
+#define FW_ADDSTATE_LINK_SPEED_10GBPS		0x0800
+
+#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS	0x006B
+#define IPV6_DEFAULT_DDB_ENTRY			0x0001
+
+#define MBOX_CMD_CONN_OPEN_SESS_LOGIN		0x0074
+#define MBOX_CMD_GET_CRASH_RECORD		0x0076	/* 4010 only */
+#define MBOX_CMD_GET_CONN_EVENT_LOG		0x0077
+
+#define MBOX_CMD_IDC_ACK			0x0101
+#define MBOX_CMD_IDC_TIME_EXTEND		0x0102
+#define MBOX_CMD_PORT_RESET			0x0120
+#define MBOX_CMD_SET_PORT_CONFIG		0x0122
+
+/*  Mailbox status definitions */
+#define MBOX_COMPLETION_STATUS			4
+#define MBOX_STS_BUSY				0x0007
+#define MBOX_STS_INTERMEDIATE_COMPLETION	0x1000
+#define MBOX_STS_COMMAND_COMPLETE		0x4000
+#define MBOX_STS_COMMAND_ERROR			0x4005
+
+#define MBOX_ASYNC_EVENT_STATUS			8
+#define MBOX_ASTS_SYSTEM_ERROR			0x8002
+#define MBOX_ASTS_REQUEST_TRANSFER_ERROR	0x8003
+#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR	0x8004
+#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM	0x8005
+#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED	0x8006
+#define MBOX_ASTS_LINK_UP			0x8010
+#define MBOX_ASTS_LINK_DOWN			0x8011
+#define MBOX_ASTS_DATABASE_CHANGED		0x8014
+#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED	0x8015
+#define MBOX_ASTS_SELF_TEST_FAILED		0x8016
+#define MBOX_ASTS_LOGIN_FAILED			0x8017
+#define MBOX_ASTS_DNS				0x8018
+#define MBOX_ASTS_HEARTBEAT			0x8019
+#define MBOX_ASTS_NVRAM_INVALID			0x801A
+#define MBOX_ASTS_MAC_ADDRESS_CHANGED		0x801B
+#define MBOX_ASTS_IP_ADDRESS_CHANGED		0x801C
+#define MBOX_ASTS_DHCP_LEASE_EXPIRED		0x801D
+#define MBOX_ASTS_DHCP_LEASE_ACQUIRED		0x801F
+#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
+#define MBOX_ASTS_DUPLICATE_IP			0x8025
+#define MBOX_ASTS_ARP_COMPLETE			0x8026
+#define MBOX_ASTS_SUBNET_STATE_CHANGE		0x8027
+#define MBOX_ASTS_RESPONSE_QUEUE_FULL		0x8028
+#define MBOX_ASTS_IP_ADDR_STATE_CHANGED		0x8029
+#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED	0x802A
+#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE		0x802B
+#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED	0x802C
+#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED	0x802D
+#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD		0x802E
+#define MBOX_ASTS_INITIALIZATION_FAILED		0x8031
+#define MBOX_ASTS_SYSTEM_WARNING_EVENT		0x8036
+#define MBOX_ASTS_IDC_COMPLETE			0x8100
+#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION	0x8101
+#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION	0x8102
+#define MBOX_ASTS_DCBX_CONF_CHANGE		0x8110
+#define MBOX_ASTS_TXSCVR_INSERTED		0x8130
+#define MBOX_ASTS_TXSCVR_REMOVED		0x8131
+
+#define ISNS_EVENT_DATA_RECEIVED		0x0000
+#define ISNS_EVENT_CONNECTION_OPENED		0x0001
+#define ISNS_EVENT_CONNECTION_FAILED		0x0002
+#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR	0x8022
+#define MBOX_ASTS_SUBNET_STATE_CHANGE		0x8027
+
+/* ACB Configuration Defines */
+#define ACB_CONFIG_DISABLE		0x00
+#define ACB_CONFIG_SET			0x01
+
+/* ACB/IP Address State Defines */
+#define IP_ADDRSTATE_UNCONFIGURED	0
+#define IP_ADDRSTATE_INVALID		1
+#define IP_ADDRSTATE_ACQUIRING		2
+#define IP_ADDRSTATE_TENTATIVE		3
+#define IP_ADDRSTATE_DEPRICATED		4
+#define IP_ADDRSTATE_PREFERRED		5
+#define IP_ADDRSTATE_DISABLING		6
+
+/* FLASH offsets */
+#define FLASH_SEGMENT_IFCB	0x04000000
+
+#define FLASH_OPT_RMW_HOLD	0
+#define FLASH_OPT_RMW_INIT	1
+#define FLASH_OPT_COMMIT	2
+#define FLASH_OPT_RMW_COMMIT	3
+
+/* generic defines to enable/disable params */
+#define QL4_PARAM_DISABLE	0
+#define QL4_PARAM_ENABLE	1
+
+/*************************************************************************/
+
+/* Host Adapter Initialization Control Block (from host) */
+struct addr_ctrl_blk {
+	uint8_t version;	/* 00 */
+#define  IFCB_VER_MIN			0x01
+#define  IFCB_VER_MAX			0x02
+	uint8_t control;	/* 01 */
+#define	 CTRLOPT_NEW_CONN_DISABLE	0x0002
+
+	uint16_t fw_options;	/* 02-03 */
+#define	 FWOPT_HEARTBEAT_ENABLE		  0x1000
+#define	 FWOPT_SESSION_MODE		  0x0040
+#define	 FWOPT_INITIATOR_MODE		  0x0020
+#define	 FWOPT_TARGET_MODE		  0x0010
+#define	 FWOPT_ENABLE_CRBDB		  0x8000
+
+	uint16_t exec_throttle;	/* 04-05 */
+	uint8_t zio_count;	/* 06 */
+	uint8_t res0;	/* 07 */
+	uint16_t eth_mtu_size;	/* 08-09 */
+	uint16_t add_fw_options;	/* 0A-0B */
+#define ADFWOPT_SERIALIZE_TASK_MGMT	0x0400
+#define ADFWOPT_AUTOCONN_DISABLE	0x0002
+
+	uint8_t hb_interval;	/* 0C */
+	uint8_t inst_num; /* 0D */
+	uint16_t res1;		/* 0E-0F */
+	uint16_t rqq_consumer_idx;	/* 10-11 */
+	uint16_t compq_producer_idx;	/* 12-13 */
+	uint16_t rqq_len;	/* 14-15 */
+	uint16_t compq_len;	/* 16-17 */
+	uint32_t rqq_addr_lo;	/* 18-1B */
+	uint32_t rqq_addr_hi;	/* 1C-1F */
+	uint32_t compq_addr_lo;	/* 20-23 */
+	uint32_t compq_addr_hi;	/* 24-27 */
+	uint32_t shdwreg_addr_lo;	/* 28-2B */
+	uint32_t shdwreg_addr_hi;	/* 2C-2F */
+
+	uint16_t iscsi_opts;	/* 30-31 */
+#define ISCSIOPTS_HEADER_DIGEST_EN		0x2000
+#define ISCSIOPTS_DATA_DIGEST_EN		0x1000
+#define ISCSIOPTS_IMMEDIATE_DATA_EN		0x0800
+#define ISCSIOPTS_INITIAL_R2T_EN		0x0400
+#define ISCSIOPTS_DATA_SEQ_INORDER_EN		0x0200
+#define ISCSIOPTS_DATA_PDU_INORDER_EN		0x0100
+#define ISCSIOPTS_CHAP_AUTH_EN			0x0080
+#define ISCSIOPTS_SNACK_EN			0x0040
+#define ISCSIOPTS_DISCOVERY_LOGOUT_EN		0x0020
+#define ISCSIOPTS_BIDI_CHAP_EN			0x0010
+#define ISCSIOPTS_DISCOVERY_AUTH_EN		0x0008
+#define ISCSIOPTS_STRICT_LOGIN_COMP_EN		0x0004
+#define ISCSIOPTS_ERL				0x0003
+	uint16_t ipv4_tcp_opts;	/* 32-33 */
+#define TCPOPT_DELAYED_ACK_DISABLE	0x8000
+#define TCPOPT_DHCP_ENABLE		0x0200
+#define TCPOPT_DNS_SERVER_IP_EN		0x0100
+#define TCPOPT_SLP_DA_INFO_EN		0x0080
+#define TCPOPT_NAGLE_ALGO_DISABLE	0x0020
+#define TCPOPT_WINDOW_SCALE_DISABLE	0x0010
+#define TCPOPT_TIMER_SCALE		0x000E
+#define TCPOPT_TIMESTAMP_ENABLE		0x0001
+	uint16_t ipv4_ip_opts;	/* 34-35 */
+#define IPOPT_IPV4_PROTOCOL_ENABLE	0x8000
+#define IPOPT_IPV4_TOS_EN		0x4000
+#define IPOPT_VLAN_TAGGING_ENABLE	0x2000
+#define IPOPT_GRAT_ARP_EN		0x1000
+#define IPOPT_ALT_CID_EN		0x0800
+#define IPOPT_REQ_VID_EN		0x0400
+#define IPOPT_USE_VID_EN		0x0200
+#define IPOPT_LEARN_IQN_EN		0x0100
+#define IPOPT_FRAGMENTATION_DISABLE	0x0010
+#define IPOPT_IN_FORWARD_EN		0x0008
+#define IPOPT_ARP_REDIRECT_EN		0x0004
+
+	uint16_t iscsi_max_pdu_size;	/* 36-37 */
+	uint8_t ipv4_tos;	/* 38 */
+	uint8_t ipv4_ttl;	/* 39 */
+	uint8_t acb_version;	/* 3A */
+#define ACB_NOT_SUPPORTED		0x00
+#define ACB_SUPPORTED			0x02 /* Capable of ACB Version 2
+						Features */
+
+	uint8_t res2;	/* 3B */
+	uint16_t def_timeout;	/* 3C-3D */
+	uint16_t iscsi_fburst_len;	/* 3E-3F */
+	uint16_t iscsi_def_time2wait;	/* 40-41 */
+	uint16_t iscsi_def_time2retain;	/* 42-43 */
+	uint16_t iscsi_max_outstnd_r2t;	/* 44-45 */
+	uint16_t conn_ka_timeout;	/* 46-47 */
+	uint16_t ipv4_port;	/* 48-49 */
+	uint16_t iscsi_max_burst_len;	/* 4A-4B */
+	uint32_t res5;		/* 4C-4F */
+	uint8_t ipv4_addr[4];	/* 50-53 */
+	uint16_t ipv4_vlan_tag;	/* 54-55 */
+	uint8_t ipv4_addr_state;	/* 56 */
+	uint8_t ipv4_cacheid;	/* 57 */
+	uint8_t res6[8];	/* 58-5F */
+	uint8_t ipv4_subnet[4];	/* 60-63 */
+	uint8_t res7[12];	/* 64-6F */
+	uint8_t ipv4_gw_addr[4];	/* 70-73 */
+	uint8_t res8[0xc];	/* 74-7F */
+	uint8_t pri_dns_srvr_ip[4];/* 80-83 */
+	uint8_t sec_dns_srvr_ip[4];/* 84-87 */
+	uint16_t min_eph_port;	/* 88-89 */
+	uint16_t max_eph_port;	/* 8A-8B */
+	uint8_t res9[4];	/* 8C-8F */
+	uint8_t iscsi_alias[32];/* 90-AF */
+	uint8_t res9_1[0x16];	/* B0-C5 */
+	uint16_t tgt_portal_grp;/* C6-C7 */
+	uint8_t abort_timer;	/* C8	 */
+	uint8_t ipv4_tcp_wsf;	/* C9	 */
+	uint8_t res10[6];	/* CA-CF */
+	uint8_t ipv4_sec_ip_addr[4];	/* D0-D3 */
+	uint8_t ipv4_dhcp_vid_len;	/* D4 */
+	uint8_t ipv4_dhcp_vid[11];	/* D5-DF */
+	uint8_t res11[20];	/* E0-F3 */
+	uint8_t ipv4_dhcp_alt_cid_len;	/* F4 */
+	uint8_t ipv4_dhcp_alt_cid[11];	/* F5-FF */
+	uint8_t iscsi_name[224];	/* 100-1DF */
+	uint8_t res12[32];	/* 1E0-1FF */
+	uint32_t cookie;	/* 200-203 */
+	uint16_t ipv6_port;	/* 204-205 */
+	uint16_t ipv6_opts;	/* 206-207 */
+#define IPV6_OPT_IPV6_PROTOCOL_ENABLE		0x8000
+#define IPV6_OPT_VLAN_TAGGING_ENABLE		0x2000
+#define IPV6_OPT_GRAT_NEIGHBOR_ADV_EN		0x1000
+#define IPV6_OPT_REDIRECT_EN			0x0004
+
+	uint16_t ipv6_addtl_opts;	/* 208-209 */
+#define IPV6_ADDOPT_IGNORE_ICMP_ECHO_REQ		0x0040
+#define IPV6_ADDOPT_MLD_EN				0x0004
+#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE	0x0002 /* Pri ACB
+								  Only */
+#define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR		0x0001
+
+	uint16_t ipv6_tcp_opts;	/* 20A-20B */
+#define IPV6_TCPOPT_DELAYED_ACK_DISABLE		0x8000
+#define IPV6_TCPOPT_NAGLE_ALGO_DISABLE		0x0020
+#define IPV6_TCPOPT_WINDOW_SCALE_DISABLE	0x0010
+#define IPV6_TCPOPT_TIMER_SCALE			0x000E
+#define IPV6_TCPOPT_TIMESTAMP_EN		0x0001
+	uint8_t ipv6_tcp_wsf;	/* 20C */
+	uint16_t ipv6_flow_lbl;	/* 20D-20F */
+	uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
+	uint16_t ipv6_vlan_tag;	/* 220-221 */
+	uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
+	uint8_t ipv6_addr0_state;	/* 223 */
+	uint8_t ipv6_addr1_state;	/* 224 */
+	uint8_t ipv6_dflt_rtr_state;    /* 225 */
+#define IPV6_RTRSTATE_UNKNOWN                   0
+#define IPV6_RTRSTATE_MANUAL                    1
+#define IPV6_RTRSTATE_ADVERTISED                3
+#define IPV6_RTRSTATE_STALE                     4
+
+	uint8_t ipv6_traffic_class;	/* 226 */
+	uint8_t ipv6_hop_limit;	/* 227 */
+	uint8_t ipv6_if_id[8];	/* 228-22F */
+	uint8_t ipv6_addr0[16];	/* 230-23F */
+	uint8_t ipv6_addr1[16];	/* 240-24F */
+	uint32_t ipv6_nd_reach_time;	/* 250-253 */
+	uint32_t ipv6_nd_rexmit_timer;	/* 254-257 */
+	uint32_t ipv6_nd_stale_timeout;	/* 258-25B */
+	uint8_t ipv6_dup_addr_detect_count;	/* 25C */
+	uint8_t ipv6_cache_id;	/* 25D */
+	uint8_t res13[18];	/* 25E-26F */
+	uint32_t ipv6_gw_advrt_mtu;	/* 270-273 */
+	uint8_t res14[140];	/* 274-2FF */
+};
+
+#define IP_ADDR_COUNT	4 /* Total 4 IP address supported in one interface
+			   * One IPv4, one IPv6 link local and 2 IPv6
+			   */
+
+#define IP_STATE_MASK	0x0F000000
+#define IP_STATE_SHIFT	24
+
+struct init_fw_ctrl_blk {
+	struct addr_ctrl_blk pri;
+/*	struct addr_ctrl_blk sec;*/
+};
+
+#define PRIMARI_ACB		0
+#define SECONDARY_ACB		1
+
+struct addr_ctrl_blk_def {
+	uint8_t reserved1[1];	/* 00 */
+	uint8_t control;	/* 01 */
+	uint8_t reserved2[11];	/* 02-0C */
+	uint8_t inst_num;	/* 0D */
+	uint8_t reserved3[34];	/* 0E-2F */
+	uint16_t iscsi_opts;	/* 30-31 */
+	uint16_t ipv4_tcp_opts;	/* 32-33 */
+	uint16_t ipv4_ip_opts;	/* 34-35 */
+	uint16_t iscsi_max_pdu_size;	/* 36-37 */
+	uint8_t ipv4_tos;	/* 38 */
+	uint8_t ipv4_ttl;	/* 39 */
+	uint8_t reserved4[2];	/* 3A-3B */
+	uint16_t def_timeout;	/* 3C-3D */
+	uint16_t iscsi_fburst_len;	/* 3E-3F */
+	uint8_t reserved5[4];	/* 40-43 */
+	uint16_t iscsi_max_outstnd_r2t;	/* 44-45 */
+	uint8_t reserved6[2];	/* 46-47 */
+	uint16_t ipv4_port;	/* 48-49 */
+	uint16_t iscsi_max_burst_len;	/* 4A-4B */
+	uint8_t reserved7[4];	/* 4C-4F */
+	uint8_t ipv4_addr[4];	/* 50-53 */
+	uint16_t ipv4_vlan_tag;	/* 54-55 */
+	uint8_t ipv4_addr_state;	/* 56 */
+	uint8_t ipv4_cacheid;	/* 57 */
+	uint8_t reserved8[8];	/* 58-5F */
+	uint8_t ipv4_subnet[4];	/* 60-63 */
+	uint8_t reserved9[12];	/* 64-6F */
+	uint8_t ipv4_gw_addr[4];	/* 70-73 */
+	uint8_t reserved10[84];	/* 74-C7 */
+	uint8_t abort_timer;	/* C8    */
+	uint8_t ipv4_tcp_wsf;	/* C9    */
+	uint8_t reserved11[10];	/* CA-D3 */
+	uint8_t ipv4_dhcp_vid_len;	/* D4 */
+	uint8_t ipv4_dhcp_vid[11];	/* D5-DF */
+	uint8_t reserved12[20];	/* E0-F3 */
+	uint8_t ipv4_dhcp_alt_cid_len;	/* F4 */
+	uint8_t ipv4_dhcp_alt_cid[11];	/* F5-FF */
+	uint8_t iscsi_name[224];	/* 100-1DF */
+	uint8_t reserved13[32];	/* 1E0-1FF */
+	uint32_t cookie;	/* 200-203 */
+	uint16_t ipv6_port;	/* 204-205 */
+	uint16_t ipv6_opts;	/* 206-207 */
+	uint16_t ipv6_addtl_opts;	/* 208-209 */
+	uint16_t ipv6_tcp_opts;		/* 20A-20B */
+	uint8_t ipv6_tcp_wsf;		/* 20C */
+	uint16_t ipv6_flow_lbl;		/* 20D-20F */
+	uint8_t ipv6_dflt_rtr_addr[16];	/* 210-21F */
+	uint16_t ipv6_vlan_tag;		/* 220-221 */
+	uint8_t ipv6_lnk_lcl_addr_state;	/* 222 */
+	uint8_t ipv6_addr0_state;	/* 223 */
+	uint8_t ipv6_addr1_state;	/* 224 */
+	uint8_t ipv6_dflt_rtr_state;	/* 225 */
+	uint8_t ipv6_traffic_class;	/* 226 */
+	uint8_t ipv6_hop_limit;		/* 227 */
+	uint8_t ipv6_if_id[8];		/* 228-22F */
+	uint8_t ipv6_addr0[16];		/* 230-23F */
+	uint8_t ipv6_addr1[16];		/* 240-24F */
+	uint32_t ipv6_nd_reach_time;	/* 250-253 */
+	uint32_t ipv6_nd_rexmit_timer;	/* 254-257 */
+	uint32_t ipv6_nd_stale_timeout;	/* 258-25B */
+	uint8_t ipv6_dup_addr_detect_count;	/* 25C */
+	uint8_t ipv6_cache_id;		/* 25D */
+	uint8_t reserved14[18];		/* 25E-26F */
+	uint32_t ipv6_gw_advrt_mtu;	/* 270-273 */
+	uint8_t reserved15[140];	/* 274-2FF */
+};
+
+/*************************************************************************/
+
+#define MAX_CHAP_ENTRIES_40XX	128
+#define MAX_CHAP_ENTRIES_82XX	1024
+#define MAX_RESRV_CHAP_IDX	3
+#define FLASH_CHAP_OFFSET	0x06000000
+
+struct ql4_chap_table {
+	uint16_t link;
+	uint8_t flags;
+	uint8_t secret_len;
+#define MIN_CHAP_SECRET_LEN	12
+#define MAX_CHAP_SECRET_LEN	100
+	uint8_t secret[MAX_CHAP_SECRET_LEN];
+#define MAX_CHAP_NAME_LEN	256
+	uint8_t name[MAX_CHAP_NAME_LEN];
+	uint16_t reserved;
+#define CHAP_VALID_COOKIE	0x4092
+#define CHAP_INVALID_COOKIE	0xFFEE
+	uint16_t cookie;
+};
+
+struct dev_db_entry {
+	uint16_t options;	/* 00-01 */
+#define DDB_OPT_DISC_SESSION  0x10
+#define DDB_OPT_TARGET	      0x02 /* device is a target */
+#define DDB_OPT_IPV6_DEVICE	0x100
+#define DDB_OPT_AUTO_SENDTGTS_DISABLE		0x40
+#define DDB_OPT_IPV6_NULL_LINK_LOCAL		0x800 /* post connection */
+#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL	0x800 /* pre connection */
+
+#define OPT_IS_FW_ASSIGNED_IPV6		11
+#define OPT_IPV6_DEVICE			8
+#define OPT_AUTO_SENDTGTS_DISABLE	6
+#define OPT_DISC_SESSION		4
+#define OPT_ENTRY_STATE			3
+	uint16_t exec_throttle;	/* 02-03 */
+	uint16_t exec_count;	/* 04-05 */
+	uint16_t res0;	/* 06-07 */
+	uint16_t iscsi_options;	/* 08-09 */
+#define ISCSIOPT_HEADER_DIGEST_EN		13
+#define ISCSIOPT_DATA_DIGEST_EN			12
+#define ISCSIOPT_IMMEDIATE_DATA_EN		11
+#define ISCSIOPT_INITIAL_R2T_EN			10
+#define ISCSIOPT_DATA_SEQ_IN_ORDER		9
+#define ISCSIOPT_DATA_PDU_IN_ORDER		8
+#define ISCSIOPT_CHAP_AUTH_EN			7
+#define ISCSIOPT_SNACK_REQ_EN			6
+#define ISCSIOPT_DISCOVERY_LOGOUT_EN		5
+#define ISCSIOPT_BIDI_CHAP_EN			4
+#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL	3
+#define ISCSIOPT_ERL1				1
+#define ISCSIOPT_ERL0				0
+
+	uint16_t tcp_options;	/* 0A-0B */
+#define TCPOPT_TIMESTAMP_STAT	6
+#define TCPOPT_NAGLE_DISABLE	5
+#define TCPOPT_WSF_DISABLE	4
+#define TCPOPT_TIMER_SCALE3	3
+#define TCPOPT_TIMER_SCALE2	2
+#define TCPOPT_TIMER_SCALE1	1
+#define TCPOPT_TIMESTAMP_EN	0
+
+	uint16_t ip_options;	/* 0C-0D */
+#define IPOPT_FRAGMENT_DISABLE	4
+
+	uint16_t iscsi_max_rcv_data_seg_len;	/* 0E-0F */
+#define BYTE_UNITS	512
+	uint32_t res1;	/* 10-13 */
+	uint16_t iscsi_max_snd_data_seg_len;	/* 14-15 */
+	uint16_t iscsi_first_burst_len;	/* 16-17 */
+	uint16_t iscsi_def_time2wait;	/* 18-19 */
+	uint16_t iscsi_def_time2retain;	/* 1A-1B */
+	uint16_t iscsi_max_outsnd_r2t;	/* 1C-1D */
+	uint16_t ka_timeout;	/* 1E-1F */
+	uint8_t isid[6];	/* 20-25 big-endian, must be converted
+				 * to little-endian */
+	uint16_t tsid;		/* 26-27 */
+	uint16_t port;	/* 28-29 */
+	uint16_t iscsi_max_burst_len;	/* 2A-2B */
+	uint16_t def_timeout;	/* 2C-2D */
+	uint16_t res2;	/* 2E-2F */
+	uint8_t ip_addr[0x10];	/* 30-3F */
+	uint8_t iscsi_alias[0x20];	/* 40-5F */
+	uint8_t tgt_addr[0x20];	/* 60-7F */
+	uint16_t mss;	/* 80-81 */
+	uint16_t res3;	/* 82-83 */
+	uint16_t lcl_port;	/* 84-85 */
+	uint8_t ipv4_tos;	/* 86 */
+	uint16_t ipv6_flow_lbl;	/* 87-89 */
+	uint8_t res4[0x36];	/* 8A-BF */
+	uint8_t iscsi_name[0xE0];	/* C0-19F : xxzzy Make this a
+					 * pointer to a string so we
+					 * don't have to reserve so
+					 * much RAM */
+	uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
+	uint8_t res5[0x10];	/* 1B0-1BF */
+#define DDB_NO_LINK	0xFFFF
+#define DDB_ISNS	0xFFFD
+	uint16_t ddb_link;	/* 1C0-1C1 */
+	uint16_t chap_tbl_idx;	/* 1C2-1C3 */
+	uint16_t tgt_portal_grp; /* 1C4-1C5 */
+	uint8_t tcp_xmt_wsf;	/* 1C6 */
+	uint8_t tcp_rcv_wsf;	/* 1C7 */
+	uint32_t stat_sn;	/* 1C8-1CB */
+	uint32_t exp_stat_sn;	/* 1CC-1CF */
+	uint8_t res6[0x2b];	/* 1D0-1FB */
+#define DDB_VALID_COOKIE	0x9034
+	uint16_t cookie;	/* 1FC-1FD */
+	uint16_t len;		/* 1FE-1FF */
+};
+
+/*************************************************************************/
+
+/* Flash definitions */
+
+#define FLASH_OFFSET_SYS_INFO	0x02000000
+#define FLASH_DEFAULTBLOCKSIZE	0x20000
+#define FLASH_EOF_OFFSET	(FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
+							    * for EOF
+							    * signature */
+#define FLASH_RAW_ACCESS_ADDR	0x8e000000
+
+#define BOOT_PARAM_OFFSET_PORT0 0x3b0
+#define BOOT_PARAM_OFFSET_PORT1 0x7b0
+
+#define FLASH_OFFSET_DB_INFO	0x05000000
+#define FLASH_OFFSET_DB_END	(FLASH_OFFSET_DB_INFO + 0x7fff)
+
+
+struct sys_info_phys_addr {
+	uint8_t address[6];	/* 00-05 */
+	uint8_t filler[2];	/* 06-07 */
+};
+
+struct flash_sys_info {
+	uint32_t cookie;	/* 00-03 */
+	uint32_t physAddrCount; /* 04-07 */
+	struct sys_info_phys_addr physAddr[4]; /* 08-27 */
+	uint8_t vendorId[128];	/* 28-A7 */
+	uint8_t productId[128]; /* A8-127 */
+	uint32_t serialNumber;	/* 128-12B */
+
+	/*  PCI Configuration values */
+	uint32_t pciDeviceVendor;	/* 12C-12F */
+	uint32_t pciDeviceId;	/* 130-133 */
+	uint32_t pciSubsysVendor;	/* 134-137 */
+	uint32_t pciSubsysId;	/* 138-13B */
+
+	/*  This validates version 1. */
+	uint32_t crumbs;	/* 13C-13F */
+
+	uint32_t enterpriseNumber;	/* 140-143 */
+
+	uint32_t mtu;		/* 144-147 */
+	uint32_t reserved0;	/* 148-14b */
+	uint32_t crumbs2;	/* 14c-14f */
+	uint8_t acSerialNumber[16];	/* 150-15f */
+	uint32_t crumbs3;	/* 160-16f */
+
+	/* Leave this last in the struct so it is declared invalid if
+	 * any new items are added.
+	 */
+	uint32_t reserved1[39]; /* 170-1ff */
+};	/* 200 */
+
+struct mbx_sys_info {
+	uint8_t board_id_str[16];   /*  0-f  Keep board ID string first */
+				/* in this structure for GUI. */
+	uint16_t board_id;	/* 10-11 board ID code */
+	uint16_t phys_port_cnt;	/* 12-13 number of physical network ports */
+	uint16_t port_num;	/* 14-15 network port for this PCI function */
+				/* (port 0 is first port) */
+	uint8_t mac_addr[6];	/* 16-1b MAC address for this PCI function */
+	uint32_t iscsi_pci_func_cnt;  /* 1c-1f number of iSCSI PCI functions */
+	uint32_t pci_func;	      /* 20-23 this PCI function */
+	unsigned char serial_number[16];  /* 24-33 serial number string */
+	uint8_t reserved[12];		  /* 34-3f */
+};
+
+struct about_fw_info {
+	uint16_t fw_major;		/* 00 - 01 */
+	uint16_t fw_minor;		/* 02 - 03 */
+	uint16_t fw_patch;		/* 04 - 05 */
+	uint16_t fw_build;		/* 06 - 07 */
+	uint8_t fw_build_date[16];	/* 08 - 17 ASCII String */
+	uint8_t fw_build_time[16];	/* 18 - 27 ASCII String */
+	uint8_t fw_build_user[16];	/* 28 - 37 ASCII String */
+	uint16_t fw_load_source;	/* 38 - 39 */
+					/* 1 = Flash Primary,
+					   2 = Flash Secondary,
+					   3 = Host Download
+					*/
+	uint8_t reserved1[6];		/* 3A - 3F */
+	uint16_t iscsi_major;		/* 40 - 41 */
+	uint16_t iscsi_minor;		/* 42 - 43 */
+	uint16_t bootload_major;	/* 44 - 45 */
+	uint16_t bootload_minor;	/* 46 - 47 */
+	uint16_t bootload_patch;	/* 48 - 49 */
+	uint16_t bootload_build;	/* 4A - 4B */
+	uint8_t extended_timestamp[180];/* 4C - FF */
+};
+
+struct crash_record {
+	uint16_t fw_major_version;	/* 00 - 01 */
+	uint16_t fw_minor_version;	/* 02 - 03 */
+	uint16_t fw_patch_version;	/* 04 - 05 */
+	uint16_t fw_build_version;	/* 06 - 07 */
+
+	uint8_t build_date[16]; /* 08 - 17 */
+	uint8_t build_time[16]; /* 18 - 27 */
+	uint8_t build_user[16]; /* 28 - 37 */
+	uint8_t card_serial_num[16];	/* 38 - 47 */
+
+	uint32_t time_of_crash_in_secs; /* 48 - 4B */
+	uint32_t time_of_crash_in_ms;	/* 4C - 4F */
+
+	uint16_t out_RISC_sd_num_frames;	/* 50 - 51 */
+	uint16_t OAP_sd_num_words;	/* 52 - 53 */
+	uint16_t IAP_sd_num_frames;	/* 54 - 55 */
+	uint16_t in_RISC_sd_num_words;	/* 56 - 57 */
+
+	uint8_t reserved1[28];	/* 58 - 7F */
+
+	uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
+	uint8_t in_RISC_reg_dump[256];	/*180 -27F */
+	uint8_t in_out_RISC_stack_dump[0];	/*280 - ??? */
+};
+
+struct conn_event_log_entry {
+#define MAX_CONN_EVENT_LOG_ENTRIES	100
+	uint32_t timestamp_sec; /* 00 - 03 seconds since boot */
+	uint32_t timestamp_ms;	/* 04 - 07 milliseconds since boot */
+	uint16_t device_index;	/* 08 - 09  */
+	uint16_t fw_conn_state; /* 0A - 0B  */
+	uint8_t event_type;	/* 0C - 0C  */
+	uint8_t error_code;	/* 0D - 0D  */
+	uint16_t error_code_detail;	/* 0E - 0F  */
+	uint8_t num_consecutive_events; /* 10 - 10  */
+	uint8_t rsvd[3];	/* 11 - 13  */
+};
+
+/*************************************************************************
+ *
+ *				IOCB Commands Structures and Definitions
+ *
+ *************************************************************************/
+#define IOCB_MAX_CDB_LEN	    16	/* Bytes in a CBD */
+#define IOCB_MAX_SENSEDATA_LEN	    32	/* Bytes of sense data */
+#define IOCB_MAX_EXT_SENSEDATA_LEN  60  /* Bytes of extended sense data */
+
+/* IOCB header structure */
+struct qla4_header {
+	uint8_t entryType;
+#define ET_STATUS		 0x03
+#define ET_MARKER		 0x04
+#define ET_CONT_T1		 0x0A
+#define ET_STATUS_CONTINUATION	 0x10
+#define ET_CMND_T3		 0x19
+#define ET_PASSTHRU0		 0x3A
+#define ET_PASSTHRU_STATUS	 0x3C
+#define ET_MBOX_CMD		0x38
+#define ET_MBOX_STATUS		0x39
+
+	uint8_t entryStatus;
+	uint8_t systemDefined;
+#define SD_ISCSI_PDU	0x01
+	uint8_t entryCount;
+
+	/* SyetemDefined definition */
+};
+
+/* Generic queue entry structure*/
+struct queue_entry {
+	uint8_t data[60];
+	uint32_t signature;
+
+};
+
+/* 64 bit addressing segment counts*/
+
+#define COMMAND_SEG_A64	  1
+#define CONTINUE_SEG_A64  5
+
+/* 64 bit addressing segment definition*/
+
+struct data_seg_a64 {
+	struct {
+		uint32_t addrLow;
+		uint32_t addrHigh;
+
+	} base;
+
+	uint32_t count;
+
+};
+
+/* Command Type 3 entry structure*/
+
+struct command_t3_entry {
+	struct qla4_header hdr;	/* 00-03 */
+
+	uint32_t handle;	/* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t connection_id; /* 0A-0B */
+
+	uint8_t control_flags;	/* 0C */
+
+	/* data direction  (bits 5-6) */
+#define CF_WRITE		0x20
+#define CF_READ			0x40
+#define CF_NO_DATA		0x00
+
+	/* task attributes (bits 2-0) */
+#define CF_HEAD_TAG		0x03
+#define CF_ORDERED_TAG		0x02
+#define CF_SIMPLE_TAG		0x01
+
+	/* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS
+	 * IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS
+	 * CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET
+	 * PROPERLY.
+	 */
+	uint8_t state_flags;	/* 0D */
+	uint8_t cmdRefNum;	/* 0E */
+	uint8_t reserved1;	/* 0F */
+	uint8_t cdb[IOCB_MAX_CDB_LEN];	/* 10-1F */
+	struct scsi_lun lun;	/* FCP LUN (BE). */
+	uint32_t cmdSeqNum;	/* 28-2B */
+	uint16_t timeout;	/* 2C-2D */
+	uint16_t dataSegCnt;	/* 2E-2F */
+	uint32_t ttlByteCnt;	/* 30-33 */
+	struct data_seg_a64 dataseg[COMMAND_SEG_A64];	/* 34-3F */
+
+};
+
+
+/* Continuation Type 1 entry structure*/
+struct continuation_t1_entry {
+	struct qla4_header hdr;
+
+	struct data_seg_a64 dataseg[CONTINUE_SEG_A64];
+
+};
+
+/* Parameterize for 64 or 32 bits */
+#define COMMAND_SEG	COMMAND_SEG_A64
+#define CONTINUE_SEG	CONTINUE_SEG_A64
+
+#define ET_COMMAND	ET_CMND_T3
+#define ET_CONTINUE	ET_CONT_T1
+
+/* Marker entry structure*/
+struct qla4_marker_entry {
+	struct qla4_header hdr;	/* 00-03 */
+
+	uint32_t system_defined; /* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t modifier;	/* 0A-0B */
+#define MM_LUN_RESET		0
+#define MM_TGT_WARM_RESET	1
+
+	uint16_t flags;		/* 0C-0D */
+	uint16_t reserved1;	/* 0E-0F */
+	struct scsi_lun lun;	/* FCP LUN (BE). */
+	uint64_t reserved2;	/* 18-1F */
+	uint64_t reserved3;	/* 20-27 */
+	uint64_t reserved4;	/* 28-2F */
+	uint64_t reserved5;	/* 30-37 */
+	uint64_t reserved6;	/* 38-3F */
+};
+
+/* Status entry structure*/
+struct status_entry {
+	struct qla4_header hdr;	/* 00-03 */
+
+	uint32_t handle;	/* 04-07 */
+
+	uint8_t scsiStatus;	/* 08 */
+#define SCSI_CHECK_CONDITION		  0x02
+
+	uint8_t iscsiFlags;	/* 09 */
+#define ISCSI_FLAG_RESIDUAL_UNDER	  0x02
+#define ISCSI_FLAG_RESIDUAL_OVER	  0x04
+
+	uint8_t iscsiResponse;	/* 0A */
+
+	uint8_t completionStatus;	/* 0B */
+#define SCS_COMPLETE			  0x00
+#define SCS_INCOMPLETE			  0x01
+#define SCS_RESET_OCCURRED		  0x04
+#define SCS_ABORTED			  0x05
+#define SCS_TIMEOUT			  0x06
+#define SCS_DATA_OVERRUN		  0x07
+#define SCS_DATA_UNDERRUN		  0x15
+#define SCS_QUEUE_FULL			  0x1C
+#define SCS_DEVICE_UNAVAILABLE		  0x28
+#define SCS_DEVICE_LOGGED_OUT		  0x29
+
+	uint8_t reserved1;	/* 0C */
+
+	/* state_flags MUST be at the same location as state_flags in
+	 * the Command_T3/4_Entry */
+	uint8_t state_flags;	/* 0D */
+
+	uint16_t senseDataByteCnt;	/* 0E-0F */
+	uint32_t residualByteCnt;	/* 10-13 */
+	uint32_t bidiResidualByteCnt;	/* 14-17 */
+	uint32_t expSeqNum;	/* 18-1B */
+	uint32_t maxCmdSeqNum;	/* 1C-1F */
+	uint8_t senseData[IOCB_MAX_SENSEDATA_LEN];	/* 20-3F */
+
+};
+
+/* Status Continuation entry */
+struct status_cont_entry {
+       struct qla4_header hdr; /* 00-03 */
+       uint8_t ext_sense_data[IOCB_MAX_EXT_SENSEDATA_LEN]; /* 04-63 */
+};
+
+struct passthru0 {
+	struct qla4_header hdr;		       /* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t connection_id;	/* 0A-0B */
+#define ISNS_DEFAULT_SERVER_CONN_ID	((uint16_t)0x8000)
+
+	uint16_t control_flags;	/* 0C-0D */
+#define PT_FLAG_ETHERNET_FRAME		0x8000
+#define PT_FLAG_ISNS_PDU		0x8000
+#define PT_FLAG_SEND_BUFFER		0x0200
+#define PT_FLAG_WAIT_4_RESPONSE		0x0100
+#define PT_FLAG_ISCSI_PDU		0x1000
+
+	uint16_t timeout;	/* 0E-0F */
+#define PT_DEFAULT_TIMEOUT		30 /* seconds */
+
+	struct data_seg_a64 out_dsd;    /* 10-1B */
+	uint32_t res1;		/* 1C-1F */
+	struct data_seg_a64 in_dsd;     /* 20-2B */
+	uint8_t res2[20];	/* 2C-3F */
+};
+
+struct passthru_status {
+	struct qla4_header hdr;		       /* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint16_t target;	/* 08-09 */
+	uint16_t connectionID;	/* 0A-0B */
+
+	uint8_t completionStatus;	/* 0C */
+#define PASSTHRU_STATUS_COMPLETE		0x01
+
+	uint8_t residualFlags;	/* 0D */
+
+	uint16_t timeout;	/* 0E-0F */
+	uint16_t portNumber;	/* 10-11 */
+	uint8_t res1[10];	/* 12-1B */
+	uint32_t outResidual;	/* 1C-1F */
+	uint8_t res2[12];	/* 20-2B */
+	uint32_t inResidual;	/* 2C-2F */
+	uint8_t res4[16];	/* 30-3F */
+};
+
+struct mbox_cmd_iocb {
+	struct qla4_header hdr;	/* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint32_t in_mbox[8];	/* 08-25 */
+	uint32_t res1[6];	/* 26-3F */
+};
+
+struct mbox_status_iocb {
+	struct qla4_header hdr;	/* 00-03 */
+	uint32_t handle;	/* 04-07 */
+	uint32_t out_mbox[8];	/* 08-25 */
+	uint32_t res1[6];	/* 26-3F */
+};
+
+/*
+ * ISP queue - response queue entry definition.
+ */
+struct response {
+	uint8_t data[60];
+	uint32_t signature;
+#define RESPONSE_PROCESSED	0xDEADDEAD	/* Signature */
+};
+
+struct ql_iscsi_stats {
+	uint64_t mac_tx_frames; /* 0000–0007 */
+	uint64_t mac_tx_bytes; /* 0008–000F */
+	uint64_t mac_tx_multicast_frames; /* 0010–0017 */
+	uint64_t mac_tx_broadcast_frames; /* 0018–001F */
+	uint64_t mac_tx_pause_frames; /* 0020–0027 */
+	uint64_t mac_tx_control_frames; /* 0028–002F */
+	uint64_t mac_tx_deferral; /* 0030–0037 */
+	uint64_t mac_tx_excess_deferral; /* 0038–003F */
+	uint64_t mac_tx_late_collision; /* 0040–0047 */
+	uint64_t mac_tx_abort; /* 0048–004F */
+	uint64_t mac_tx_single_collision; /* 0050–0057 */
+	uint64_t mac_tx_multiple_collision; /* 0058–005F */
+	uint64_t mac_tx_collision; /* 0060–0067 */
+	uint64_t mac_tx_frames_dropped; /* 0068–006F */
+	uint64_t mac_tx_jumbo_frames; /* 0070–0077 */
+	uint64_t mac_rx_frames; /* 0078–007F */
+	uint64_t mac_rx_bytes; /* 0080–0087 */
+	uint64_t mac_rx_unknown_control_frames; /* 0088–008F */
+	uint64_t mac_rx_pause_frames; /* 0090–0097 */
+	uint64_t mac_rx_control_frames; /* 0098–009F */
+	uint64_t mac_rx_dribble; /* 00A0–00A7 */
+	uint64_t mac_rx_frame_length_error; /* 00A8–00AF */
+	uint64_t mac_rx_jabber; /* 00B0–00B7 */
+	uint64_t mac_rx_carrier_sense_error; /* 00B8–00BF */
+	uint64_t mac_rx_frame_discarded; /* 00C0–00C7 */
+	uint64_t mac_rx_frames_dropped; /* 00C8–00CF */
+	uint64_t mac_crc_error; /* 00D0–00D7 */
+	uint64_t mac_encoding_error; /* 00D8–00DF */
+	uint64_t mac_rx_length_error_large; /* 00E0–00E7 */
+	uint64_t mac_rx_length_error_small; /* 00E8–00EF */
+	uint64_t mac_rx_multicast_frames; /* 00F0–00F7 */
+	uint64_t mac_rx_broadcast_frames; /* 00F8–00FF */
+	uint64_t ip_tx_packets; /* 0100–0107 */
+	uint64_t ip_tx_bytes; /* 0108–010F */
+	uint64_t ip_tx_fragments; /* 0110–0117 */
+	uint64_t ip_rx_packets; /* 0118–011F */
+	uint64_t ip_rx_bytes; /* 0120–0127 */
+	uint64_t ip_rx_fragments; /* 0128–012F */
+	uint64_t ip_datagram_reassembly; /* 0130–0137 */
+	uint64_t ip_invalid_address_error; /* 0138–013F */
+	uint64_t ip_error_packets; /* 0140–0147 */
+	uint64_t ip_fragrx_overlap; /* 0148–014F */
+	uint64_t ip_fragrx_outoforder; /* 0150–0157 */
+	uint64_t ip_datagram_reassembly_timeout; /* 0158–015F */
+	uint64_t ipv6_tx_packets; /* 0160–0167 */
+	uint64_t ipv6_tx_bytes; /* 0168–016F */
+	uint64_t ipv6_tx_fragments; /* 0170–0177 */
+	uint64_t ipv6_rx_packets; /* 0178–017F */
+	uint64_t ipv6_rx_bytes; /* 0180–0187 */
+	uint64_t ipv6_rx_fragments; /* 0188–018F */
+	uint64_t ipv6_datagram_reassembly; /* 0190–0197 */
+	uint64_t ipv6_invalid_address_error; /* 0198–019F */
+	uint64_t ipv6_error_packets; /* 01A0–01A7 */
+	uint64_t ipv6_fragrx_overlap; /* 01A8–01AF */
+	uint64_t ipv6_fragrx_outoforder; /* 01B0–01B7 */
+	uint64_t ipv6_datagram_reassembly_timeout; /* 01B8–01BF */
+	uint64_t tcp_tx_segments; /* 01C0–01C7 */
+	uint64_t tcp_tx_bytes; /* 01C8–01CF */
+	uint64_t tcp_rx_segments; /* 01D0–01D7 */
+	uint64_t tcp_rx_byte; /* 01D8–01DF */
+	uint64_t tcp_duplicate_ack_retx; /* 01E0–01E7 */
+	uint64_t tcp_retx_timer_expired; /* 01E8–01EF */
+	uint64_t tcp_rx_duplicate_ack; /* 01F0–01F7 */
+	uint64_t tcp_rx_pure_ackr; /* 01F8–01FF */
+	uint64_t tcp_tx_delayed_ack; /* 0200–0207 */
+	uint64_t tcp_tx_pure_ack; /* 0208–020F */
+	uint64_t tcp_rx_segment_error; /* 0210–0217 */
+	uint64_t tcp_rx_segment_outoforder; /* 0218–021F */
+	uint64_t tcp_rx_window_probe; /* 0220–0227 */
+	uint64_t tcp_rx_window_update; /* 0228–022F */
+	uint64_t tcp_tx_window_probe_persist; /* 0230–0237 */
+	uint64_t ecc_error_correction; /* 0238–023F */
+	uint64_t iscsi_pdu_tx; /* 0240-0247 */
+	uint64_t iscsi_data_bytes_tx; /* 0248-024F */
+	uint64_t iscsi_pdu_rx; /* 0250-0257 */
+	uint64_t iscsi_data_bytes_rx; /* 0258-025F */
+	uint64_t iscsi_io_completed; /* 0260-0267 */
+	uint64_t iscsi_unexpected_io_rx; /* 0268-026F */
+	uint64_t iscsi_format_error; /* 0270-0277 */
+	uint64_t iscsi_hdr_digest_error; /* 0278-027F */
+	uint64_t iscsi_data_digest_error; /* 0280-0287 */
+	uint64_t iscsi_sequence_error; /* 0288-028F */
+	uint32_t tx_cmd_pdu; /* 0290-0293 */
+	uint32_t tx_resp_pdu; /* 0294-0297 */
+	uint32_t rx_cmd_pdu; /* 0298-029B */
+	uint32_t rx_resp_pdu; /* 029C-029F */
+
+	uint64_t tx_data_octets; /* 02A0-02A7 */
+	uint64_t rx_data_octets; /* 02A8-02AF */
+
+	uint32_t hdr_digest_err; /* 02B0–02B3 */
+	uint32_t data_digest_err; /* 02B4–02B7 */
+	uint32_t conn_timeout_err; /* 02B8–02BB */
+	uint32_t framing_err; /* 02BC–02BF */
+
+	uint32_t tx_nopout_pdus; /* 02C0–02C3 */
+	uint32_t tx_scsi_cmd_pdus;  /* 02C4–02C7 */
+	uint32_t tx_tmf_cmd_pdus; /* 02C8–02CB */
+	uint32_t tx_login_cmd_pdus; /* 02CC–02CF */
+	uint32_t tx_text_cmd_pdus; /* 02D0–02D3 */
+	uint32_t tx_scsi_write_pdus; /* 02D4–02D7 */
+	uint32_t tx_logout_cmd_pdus; /* 02D8–02DB */
+	uint32_t tx_snack_req_pdus; /* 02DC–02DF */
+
+	uint32_t rx_nopin_pdus; /* 02E0–02E3 */
+	uint32_t rx_scsi_resp_pdus; /* 02E4–02E7 */
+	uint32_t rx_tmf_resp_pdus; /* 02E8–02EB */
+	uint32_t rx_login_resp_pdus; /* 02EC–02EF */
+	uint32_t rx_text_resp_pdus; /* 02F0–02F3 */
+	uint32_t rx_scsi_read_pdus; /* 02F4–02F7 */
+	uint32_t rx_logout_resp_pdus; /* 02F8–02FB */
+
+	uint32_t rx_r2t_pdus; /* 02FC–02FF */
+	uint32_t rx_async_pdus; /* 0300–0303 */
+	uint32_t rx_reject_pdus; /* 0304–0307 */
+
+	uint8_t reserved2[264]; /* 0x0308 - 0x040F */
+};
+
+#define QLA8XXX_DBG_STATE_ARRAY_LEN		16
+#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN		8
+#define QLA8XXX_DBG_RSVD_ARRAY_LEN		8
+#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN	16
+#define QLA83XX_SS_OCM_WNDREG_INDEX		3
+#define QLA83XX_SS_PCI_INDEX			0
+#define QLA8022_TEMPLATE_CAP_OFFSET		172
+#define QLA83XX_TEMPLATE_CAP_OFFSET		268
+#define QLA80XX_TEMPLATE_RESERVED_BITS		16
+
+struct qla4_8xxx_minidump_template_hdr {
+	uint32_t entry_type;
+	uint32_t first_entry_offset;
+	uint32_t size_of_template;
+	uint32_t capture_debug_level;
+	uint32_t num_of_entries;
+	uint32_t version;
+	uint32_t driver_timestamp;
+	uint32_t checksum;
+
+	uint32_t driver_capture_mask;
+	uint32_t driver_info_word2;
+	uint32_t driver_info_word3;
+	uint32_t driver_info_word4;
+
+	uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
+	uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
+	uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
+	uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];
+};
+
+#endif /*  _QLA4X_FW_H */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_glbl.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_glbl.h
new file mode 100644
index 0000000..bce96a5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -0,0 +1,292 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA4x_GBL_H
+#define	__QLA4x_GBL_H
+
+struct iscsi_cls_conn;
+
+int qla4xxx_hw_reset(struct scsi_qla_host *ha);
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
+int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
+int qla4xxx_soft_reset(struct scsi_qla_host *ha);
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
+
+void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
+void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
+
+int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
+int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
+		      uint64_t lun);
+int qla4xxx_reset_target(struct scsi_qla_host *ha,
+			 struct ddb_entry *ddb_entry);
+int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+		      uint32_t offset, uint32_t len);
+int qla4xxx_get_firmware_status(struct scsi_qla_host *ha);
+int qla4xxx_get_firmware_state(struct scsi_qla_host *ha);
+int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha);
+
+/* FIXME: Goodness!  this really wants a small struct to hold the
+ * parameters. On x86 the args will get passed on the stack! */
+int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
+			    uint16_t fw_ddb_index,
+			    struct dev_db_entry *fw_ddb_entry,
+			    dma_addr_t fw_ddb_entry_dma,
+			    uint32_t *num_valid_ddb_entries,
+			    uint32_t *next_ddb_index,
+			    uint32_t *fw_ddb_device_state,
+			    uint32_t *conn_err_detail,
+			    uint16_t *tcp_source_port_num,
+			    uint16_t *connection_id);
+
+int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
+			  dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts);
+uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+			 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma);
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
+				   uint16_t fw_ddb_index,
+				   uint16_t connection_id,
+				   uint16_t option);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+		    uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+		    uint32_t acb_type, uint32_t len);
+int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
+			 uint32_t ip_idx, uint32_t *sts);
+void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session);
+u16 rd_nvram_word(struct scsi_qla_host *ha, int offset);
+u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset);
+void qla4xxx_get_crash_record(struct scsi_qla_host *ha);
+int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
+int qla4xxx_about_firmware(struct scsi_qla_host *ha);
+void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
+				       uint32_t intr_status);
+int qla4xxx_init_rings(struct scsi_qla_host *ha);
+void qla4xxx_srb_compl(struct kref *ref);
+struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
+		uint32_t index);
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+		uint32_t state, uint32_t conn_error);
+void qla4xxx_dump_buffer(void *b, uint32_t size);
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+	struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod);
+int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+		      uint32_t offset, uint32_t length, uint32_t options);
+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
+		uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+			   char *password, int bidi, uint16_t *chap_index);
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+		     uint16_t idx, int bidi);
+
+void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
+void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
+int qla4xxx_get_sys_info(struct scsi_qla_host *ha);
+int qla4xxx_iospace_config(struct scsi_qla_host *ha);
+void qla4xxx_pci_config(struct scsi_qla_host *ha);
+int qla4xxx_start_firmware(struct scsi_qla_host *ha);
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
+uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+int qla4xxx_request_irqs(struct scsi_qla_host *ha);
+void qla4xxx_free_irqs(struct scsi_qla_host *ha);
+void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
+void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
+void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
+void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
+void qla4xxx_dump_registers(struct scsi_qla_host *ha);
+uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+				  uint32_t *mbox_cmd,
+				  uint32_t *mbox_sts,
+				  struct addr_ctrl_blk *init_fw_cb,
+				  dma_addr_t init_fw_cb_dma);
+
+void qla4_8xxx_pci_config(struct scsi_qla_host *);
+int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
+int qla4_8xxx_load_risc(struct scsi_qla_host *);
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
+
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
+int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
+void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
+int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
+int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
+int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
+		uint32_t intr_status);
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
+int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
+void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
+int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
+int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
+void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
+void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
+int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
+irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
+irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
+irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
+void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
+void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
+int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
+void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
+void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
+void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
+int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index);
+int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
+			       struct ddb_entry *ddb_entry,
+			       struct iscsi_cls_conn *cls_conn,
+			       uint32_t *mbx_sts);
+int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
+			       struct ddb_entry *ddb_entry, int options);
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			  uint32_t *mbx_sts);
+int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
+int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
+int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
+			  uint16_t stats_size, dma_addr_t stats_dma);
+void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
+				       struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+					     struct ddb_entry *ddb_entry);
+int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
+			    struct dev_db_entry *fw_ddb_entry,
+			    dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
+int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username,
+		     char *password, uint16_t idx);
+int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+		      uint32_t offset, uint32_t size);
+int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+		      uint32_t offset, uint32_t size);
+int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
+				     uint32_t region, uint32_t field0,
+				     uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			     struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+		       struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+			  enum iscsi_host_event_code aen_code,
+			  uint32_t data_size, uint8_t *data);
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+		      uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+			       uint32_t status, uint32_t pid,
+			       uint32_t data_size, uint8_t *data);
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+			     struct dev_db_entry *fw_ddb_entry,
+			     dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
+
+/* BSG Functions */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job);
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+				  dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
+int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+			      int incount);
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+			    int incount);
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+					 uint32_t intr_status);
+int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
+uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
+void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
+int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+			      uint32_t *data);
+int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
+			      uint32_t data);
+int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
+void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
+void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
+void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+			      int incount);
+void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
+void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
+void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
+int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
+				      uint32_t flash_addr, uint8_t *p_data,
+				      int u32_word_count);
+void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
+void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
+int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
+			     uint8_t *p_data, int u32_word_count);
+void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
+void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+			    dma_addr_t dma_addr);
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+				  char *password, uint16_t chap_index);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+		    uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+		    uint32_t acb_type, uint32_t len);
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
+				uint64_t addr, uint32_t *data, uint32_t count);
+uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
+int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
+int qla4_83xx_is_detached(struct scsi_qla_host *ha);
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
+
+extern int ql4xextended_error_logging;
+extern int ql4xdontresethba;
+extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
+
+extern struct device_attribute *qla4xxx_host_attrs[];
+#endif /* _QLA4x_GBL_H */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_init.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_init.c
new file mode 100644
index 0000000..5d6d158
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_init.c
@@ -0,0 +1,1267 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <scsi/iscsi_if.h>
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
+{
+	uint32_t value;
+	uint8_t func_number;
+	unsigned long flags;
+
+	/* Get the function number */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	value = readw(&ha->reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	func_number = (uint8_t) ((value >> 4) & 0x30);
+	switch (value & ISP_CONTROL_FN_MASK) {
+	case ISP_CONTROL_FN0_SCSI:
+		ha->mac_index = 1;
+		break;
+	case ISP_CONTROL_FN1_SCSI:
+		ha->mac_index = 3;
+		break;
+	default:
+		DEBUG2(printk("scsi%ld: %s: Invalid function number, "
+			      "ispControlStatus = 0x%x\n", ha->host_no,
+			      __func__, value));
+		break;
+	}
+	DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__,
+		      ha->mac_index));
+}
+
+/**
+ * qla4xxx_free_ddb - deallocate ddb
+ * @ha: pointer to host adapter structure.
+ * @ddb_entry: pointer to device database entry
+ *
+ * This routine marks a DDB entry INVALID
+ **/
+void qla4xxx_free_ddb(struct scsi_qla_host *ha,
+    struct ddb_entry *ddb_entry)
+{
+	/* Remove device pointer from index mapping arrays */
+	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
+		(struct ddb_entry *) INVALID_ENTRY;
+	ha->tot_ddbs--;
+}
+
+/**
+ * qla4xxx_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ **/
+static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
+{
+	uint16_t cnt;
+	struct response *pkt;
+
+	pkt = (struct response *)ha->response_ptr;
+	for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) {
+		pkt->signature = RESPONSE_PROCESSED;
+		pkt++;
+	}
+}
+
+/**
+ * qla4xxx_init_rings - initialize hw queues
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine initializes the internal queues for the specified adapter.
+ * The QLA4010 requires us to restart the queues at index 0.
+ * The QLA4000 doesn't care, so just default to QLA4010's requirement.
+ **/
+int qla4xxx_init_rings(struct scsi_qla_host *ha)
+{
+	unsigned long flags = 0;
+	int i;
+
+	/* Initialize request queue. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->request_out = 0;
+	ha->request_in = 0;
+	ha->request_ptr = &ha->request_ring[ha->request_in];
+	ha->req_q_count = REQUEST_QUEUE_DEPTH;
+
+	/* Initialize response queue. */
+	ha->response_in = 0;
+	ha->response_out = 0;
+	ha->response_ptr = &ha->response_ring[ha->response_out];
+
+	if (is_qla8022(ha)) {
+		writel(0,
+		    (unsigned long  __iomem *)&ha->qla4_82xx_reg->req_q_out);
+		writel(0,
+		    (unsigned long  __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
+		writel(0,
+		    (unsigned long  __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		writel(0,
+		       (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
+		writel(0,
+		       (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in);
+		writel(0,
+		       (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out);
+	} else {
+		/*
+		 * Initialize DMA Shadow registers.  The firmware is really
+		 * supposed to take care of this, but on some uniprocessor
+		 * systems, the shadow registers aren't cleared-- causing
+		 * the interrupt_handler to think there are responses to be
+		 * processed when there aren't.
+		 */
+		ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0);
+		ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0);
+		wmb();
+
+		writel(0, &ha->reg->req_q_in);
+		writel(0, &ha->reg->rsp_q_out);
+		readl(&ha->reg->rsp_q_out);
+	}
+
+	qla4xxx_init_response_q_entries(ha);
+
+	/* Initialize mailbox active array */
+	for (i = 0; i < MAX_MRB; i++)
+		ha->active_mrb_array[i] = NULL;
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_sys_info - validate adapter MAC address(es)
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
+{
+	struct flash_sys_info *sys_info;
+	dma_addr_t sys_info_dma;
+	int status = QLA_ERROR;
+
+	sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+				      &sys_info_dma, GFP_KERNEL);
+	if (sys_info == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+			      ha->host_no, __func__));
+
+		goto exit_get_sys_info_no_free;
+	}
+	memset(sys_info, 0, sizeof(*sys_info));
+
+	/* Get flash sys info */
+	if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
+			      sizeof(*sys_info)) != QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO "
+			      "failed\n", ha->host_no, __func__));
+
+		goto exit_get_sys_info;
+	}
+
+	/* Save M.A.C. address & serial_number */
+	memcpy(ha->my_mac, &sys_info->physAddr[0].address[0],
+	       min(sizeof(ha->my_mac),
+		   sizeof(sys_info->physAddr[0].address)));
+	memcpy(ha->serial_number, &sys_info->acSerialNumber,
+	       min(sizeof(ha->serial_number),
+		   sizeof(sys_info->acSerialNumber)));
+
+	status = QLA_SUCCESS;
+
+exit_get_sys_info:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
+			  sys_info_dma);
+
+exit_get_sys_info_no_free:
+	return status;
+}
+
+/**
+ * qla4xxx_init_local_data - initialize adapter specific local data
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
+{
+	/* Initialize aen queue */
+	ha->aen_q_count = MAX_AEN_ENTRIES;
+}
+
+static uint8_t
+qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
+{
+	uint8_t ipv4_wait = 0;
+	uint8_t ipv6_wait = 0;
+	int8_t ip_address[IPv6_ADDR_LEN] = {0} ;
+
+	/* If both IPv4 & IPv6 are enabled, possibly only one
+	 * IP address may be acquired, so check to see if we
+	 * need to wait for another */
+	if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) {
+		if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) &&
+		    ((ha->addl_fw_state &
+				    FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
+			ipv4_wait = 1;
+		}
+		if (((ha->ip_config.ipv6_addl_options &
+		      IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
+		    ((ha->ip_config.ipv6_link_local_state ==
+		      IP_ADDRSTATE_ACQUIRING) ||
+		     (ha->ip_config.ipv6_addr0_state ==
+		      IP_ADDRSTATE_ACQUIRING) ||
+		     (ha->ip_config.ipv6_addr1_state ==
+		      IP_ADDRSTATE_ACQUIRING))) {
+
+			ipv6_wait = 1;
+
+			if ((ha->ip_config.ipv6_link_local_state ==
+			     IP_ADDRSTATE_PREFERRED) ||
+			    (ha->ip_config.ipv6_addr0_state ==
+			     IP_ADDRSTATE_PREFERRED) ||
+			    (ha->ip_config.ipv6_addr1_state ==
+			     IP_ADDRSTATE_PREFERRED)) {
+				DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+					      "Preferred IP configured."
+					      " Don't wait!\n", ha->host_no,
+					      __func__));
+				ipv6_wait = 0;
+			}
+			if (memcmp(&ha->ip_config.ipv6_default_router_addr,
+				   ip_address, IPv6_ADDR_LEN) == 0) {
+				DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+					      "No Router configured. "
+					      "Don't wait!\n", ha->host_no,
+					      __func__));
+				ipv6_wait = 0;
+			}
+			if ((ha->ip_config.ipv6_default_router_state ==
+			     IPV6_RTRSTATE_MANUAL) &&
+			    (ha->ip_config.ipv6_link_local_state ==
+			     IP_ADDRSTATE_TENTATIVE) &&
+			    (memcmp(&ha->ip_config.ipv6_link_local_addr,
+			     &ha->ip_config.ipv6_default_router_addr, 4) ==
+			     0)) {
+				DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
+					"IP configured. Don't wait!\n",
+					ha->host_no, __func__));
+				ipv6_wait = 0;
+			}
+		}
+		if (ipv4_wait || ipv6_wait) {
+			DEBUG2(printk("scsi%ld: %s: Wait for additional "
+				      "IP(s) \"", ha->host_no, __func__));
+			if (ipv4_wait)
+				DEBUG2(printk("IPv4 "));
+			if (ha->ip_config.ipv6_link_local_state ==
+			    IP_ADDRSTATE_ACQUIRING)
+				DEBUG2(printk("IPv6LinkLocal "));
+			if (ha->ip_config.ipv6_addr0_state ==
+			    IP_ADDRSTATE_ACQUIRING)
+				DEBUG2(printk("IPv6Addr0 "));
+			if (ha->ip_config.ipv6_addr1_state ==
+			    IP_ADDRSTATE_ACQUIRING)
+				DEBUG2(printk("IPv6Addr1 "));
+			DEBUG2(printk("\"\n"));
+		}
+	}
+
+	return ipv4_wait|ipv6_wait;
+}
+
+static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha,
+		struct qla4_8xxx_minidump_template_hdr *md_hdr)
+{
+	int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET :
+					QLA83XX_TEMPLATE_CAP_OFFSET;
+	int rval = 1;
+	uint32_t *cap_offset;
+
+	cap_offset = (uint32_t *)((char *)md_hdr + offset);
+
+	if (!(le32_to_cpu(*cap_offset) & BIT_0)) {
+		ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n",
+			   *cap_offset);
+		rval = 0;
+	}
+
+	return rval;
+}
+
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+	int status;
+	uint32_t capture_debug_level;
+	int hdr_entry_bit, k;
+	void *md_tmp;
+	dma_addr_t md_tmp_dma;
+	struct qla4_8xxx_minidump_template_hdr *md_hdr;
+	int dma_capable;
+
+	if (ha->fw_dump) {
+		ql4_printk(KERN_WARNING, ha,
+			   "Firmware dump previously allocated.\n");
+		return;
+	}
+
+	status = qla4xxx_req_template_size(ha);
+	if (status != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi%ld: Failed to get template size\n",
+			   ha->host_no);
+		return;
+	}
+
+	clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+	/* Allocate memory for saving the template */
+	md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+				    &md_tmp_dma, GFP_KERNEL);
+	if (!md_tmp) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi%ld: Failed to allocate DMA memory\n",
+			   ha->host_no);
+		return;
+	}
+
+	/* Request template */
+	status =  qla4xxx_get_minidump_template(ha, md_tmp_dma);
+	if (status != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi%ld: Failed to get minidump template\n",
+			   ha->host_no);
+		goto alloc_cleanup;
+	}
+
+	md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+	dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr);
+
+	capture_debug_level = md_hdr->capture_debug_level;
+
+	/* Get capture mask based on module loadtime setting. */
+	if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) ||
+	    (ql4xmdcapmask == 0xFF && dma_capable))  {
+		ha->fw_dump_capture_mask = ql4xmdcapmask;
+	} else {
+		if (ql4xmdcapmask == 0xFF)
+			ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");
+		ha->fw_dump_capture_mask = capture_debug_level;
+	}
+
+	md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+			  md_hdr->num_of_entries));
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size  = %d\n",
+			  ha->fw_dump_tmplt_size));
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+			  ha->fw_dump_capture_mask));
+
+	/* Calculate fw_dump_size */
+	for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+	     hdr_entry_bit <<= 1, k++) {
+		if (hdr_entry_bit & ha->fw_dump_capture_mask)
+			ha->fw_dump_size += md_hdr->capture_size_array[k];
+	}
+
+	/* Total firmware dump size including command header */
+	ha->fw_dump_size += ha->fw_dump_tmplt_size;
+	ha->fw_dump = vmalloc(ha->fw_dump_size);
+	if (!ha->fw_dump)
+		goto alloc_cleanup;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Minidump Template Size = 0x%x KB\n",
+			  ha->fw_dump_tmplt_size));
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+	memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+	ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+	dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+			  md_tmp, md_tmp_dma);
+}
+
+static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
+{
+	uint32_t timeout_count;
+	int ready = 0;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n"));
+	for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0;
+	     timeout_count--) {
+		if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
+			qla4xxx_get_dhcp_ip_address(ha);
+
+		/* Get firmware state. */
+		if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) {
+			DEBUG2(printk("scsi%ld: %s: unable to get firmware "
+				      "state\n", ha->host_no, __func__));
+			break;
+		}
+
+		if (ha->firmware_state & FW_STATE_ERROR) {
+			DEBUG2(printk("scsi%ld: %s: an unrecoverable error has"
+				      " occurred\n", ha->host_no, __func__));
+			break;
+
+		}
+		if (ha->firmware_state & FW_STATE_CONFIG_WAIT) {
+			/*
+			 * The firmware has not yet been issued an Initialize
+			 * Firmware command, so issue it now.
+			 */
+			if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR)
+				break;
+
+			/* Go back and test for ready state - no wait. */
+			continue;
+		}
+
+		if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+			DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+				      "AUTOCONNECT in progress\n",
+				      ha->host_no, __func__));
+		}
+
+		if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+			DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:"
+				      " CONFIGURING IP\n",
+				      ha->host_no, __func__));
+			/*
+			 * Check for link state after 15 secs and if link is
+			 * still DOWN then, cable is unplugged. Ignore "DHCP
+			 * in Progress/CONFIGURING IP" bit to check if firmware
+			 * is in ready state or not after 15 secs.
+			 * This is applicable for both 2.x & 3.x firmware
+			 */
+			if (timeout_count <= (ADAPTER_INIT_TOV - 15)) {
+				if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) {
+					DEBUG2(printk(KERN_INFO "scsi%ld: %s:"
+						  " LINK UP (Cable plugged)\n",
+						  ha->host_no, __func__));
+				} else if (ha->firmware_state &
+					  (FW_STATE_CONFIGURING_IP |
+							     FW_STATE_READY)) {
+					DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
+						"LINK DOWN (Cable unplugged)\n",
+						ha->host_no, __func__));
+					ha->firmware_state = FW_STATE_READY;
+				}
+			}
+		}
+
+		if (ha->firmware_state == FW_STATE_READY) {
+			/* If DHCP IP Addr is available, retrieve it now. */
+			if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR,
+								&ha->dpc_flags))
+				qla4xxx_get_dhcp_ip_address(ha);
+
+			if (!qla4xxx_wait_for_ip_config(ha) ||
+							timeout_count == 1) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+				    "Firmware Ready..\n"));
+				/* The firmware is ready to process SCSI
+				   commands. */
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+					"scsi%ld: %s: MEDIA TYPE"
+					" - %s\n", ha->host_no,
+					__func__, (ha->addl_fw_state &
+					FW_ADDSTATE_OPTICAL_MEDIA)
+					!= 0 ? "OPTICAL" : "COPPER"));
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+					"scsi%ld: %s: DHCPv4 STATE"
+					" Enabled %s\n", ha->host_no,
+					 __func__, (ha->addl_fw_state &
+					 FW_ADDSTATE_DHCPv4_ENABLED) != 0 ?
+					"YES" : "NO"));
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+					"scsi%ld: %s: LINK %s\n",
+					ha->host_no, __func__,
+					(ha->addl_fw_state &
+					 FW_ADDSTATE_LINK_UP) != 0 ?
+					"UP" : "DOWN"));
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+					"scsi%ld: %s: iSNS Service "
+					"Started %s\n",
+					ha->host_no, __func__,
+					(ha->addl_fw_state &
+					 FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ?
+					"YES" : "NO"));
+
+				ready = 1;
+				break;
+			}
+		}
+		DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - "
+			      "seconds expired= %d\n", ha->host_no, __func__,
+			      ha->firmware_state, ha->addl_fw_state,
+			      timeout_count));
+		if (is_qla4032(ha) &&
+			!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
+			(timeout_count < ADAPTER_INIT_TOV - 5)) {
+			break;
+		}
+
+		msleep(1000);
+	}			/* end of for */
+
+	if (timeout_count <= 0)
+		DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
+			      ha->host_no, __func__));
+
+	if (ha->firmware_state & FW_STATE_CONFIGURING_IP) {
+		DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting "
+			      "it's waiting to configure an IP address\n",
+			       ha->host_no, __func__));
+		ready = 1;
+	} else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) {
+		DEBUG2(printk("scsi%ld: %s: FW initialized, but "
+			      "auto-discovery still in process\n",
+			       ha->host_no, __func__));
+		ready = 1;
+	}
+
+	return ready;
+}
+
+/**
+ * qla4xxx_init_firmware - initializes the firmware.
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
+{
+	int status = QLA_ERROR;
+
+	if (is_aer_supported(ha) &&
+	    test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
+		return status;
+
+	/* For 82xx, stop firmware before initializing because if BIOS
+	 * has previously initialized firmware, then driver's initialize
+	 * firmware will fail. */
+	if (is_qla80XX(ha))
+		qla4_8xxx_stop_firmware(ha);
+
+	ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
+	if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware "
+			      "control block\n", ha->host_no, __func__));
+		return status;
+	}
+
+	if (!qla4xxx_fw_ready(ha))
+		return status;
+
+	if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+		qla4xxx_alloc_fw_dump(ha);
+
+	return qla4xxx_get_firmware_status(ha);
+}
+
+static void qla4xxx_set_model_info(struct scsi_qla_host *ha)
+{
+	uint16_t board_id_string[8];
+	int i;
+	int size = sizeof(ha->nvram->isp4022.boardIdStr);
+	int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2;
+
+	for (i = 0; i < (size / 2) ; i++) {
+		board_id_string[i] = rd_nvram_word(ha, offset);
+		offset += 1;
+	}
+
+	memcpy(ha->model_name, board_id_string, size);
+}
+
+static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+	union external_hw_config_reg extHwConfig;
+
+	DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
+		      __func__));
+	if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
+		return QLA_ERROR;
+	if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
+		ql4xxx_unlock_flash(ha);
+		return QLA_ERROR;
+	}
+
+	/* Get EEPRom Parameters from NVRAM and validate */
+	ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n");
+	if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		extHwConfig.Asuint32_t =
+			rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	} else {
+		ql4_printk(KERN_WARNING, ha,
+		    "scsi%ld: %s: EEProm checksum invalid.  "
+		    "Please update your EEPROM\n", ha->host_no,
+		    __func__);
+
+		/* Attempt to set defaults */
+		if (is_qla4010(ha))
+			extHwConfig.Asuint32_t = 0x1912;
+		else if (is_qla4022(ha) | is_qla4032(ha))
+			extHwConfig.Asuint32_t = 0x0023;
+		else
+			return QLA_ERROR;
+	}
+
+	if (is_qla4022(ha) || is_qla4032(ha))
+		qla4xxx_set_model_info(ha);
+	else
+		strcpy(ha->model_name, "QLA4010");
+
+	DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
+		     ha->host_no, __func__, extHwConfig.Asuint32_t));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha));
+	readl(isp_ext_hw_conf(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ql4xxx_unlock_nvram(ha);
+	ql4xxx_unlock_flash(ha);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ */
+void qla4_8xxx_pci_config(struct scsi_qla_host *ha)
+{
+	pci_set_master(ha->pdev);
+}
+
+void qla4xxx_pci_config(struct scsi_qla_host *ha)
+{
+	uint16_t w;
+	int status;
+
+	ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
+
+	pci_set_master(ha->pdev);
+	status = pci_set_mwi(ha->pdev);
+	/*
+	 * We want to respect framework's setting of PCI configuration space
+	 * command register and also want to make sure that all bits of
+	 * interest to us are properly set in command register.
+	 */
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+	w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+}
+
+static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
+{
+	int status = QLA_ERROR;
+	unsigned long max_wait_time;
+	unsigned long flags;
+	uint32_t mbox_status;
+
+	ql4_printk(KERN_INFO, ha, "Starting firmware ...\n");
+
+	/*
+	 * Start firmware from flash ROM
+	 *
+	 * WORKAROUND: Stuff a non-constant value that the firmware can
+	 * use as a seed for a random number generator in MB7 prior to
+	 * setting BOOT_ENABLE.	 Fixes problem where the TCP
+	 * connections use the same TCP ports after each reboot,
+	 * causing some connections to not get re-established.
+	 */
+	DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n",
+		     ha->host_no, __func__));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel(jiffies, &ha->reg->mailbox[7]);
+	if (is_qla4022(ha) | is_qla4032(ha))
+		writel(set_rmask(NVR_WRITE_ENABLE),
+		       &ha->reg->u1.isp4022.nvram);
+
+        writel(2, &ha->reg->mailbox[6]);
+        readl(&ha->reg->mailbox[6]);
+
+	writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
+	readl(&ha->reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Wait for firmware to come UP. */
+	DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for "
+		      "boot firmware to complete...\n",
+		      ha->host_no, __func__, FIRMWARE_UP_TOV));
+	max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ);
+	do {
+		uint32_t ctrl_status;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		ctrl_status = readw(&ha->reg->ctrl_status);
+		mbox_status = readw(&ha->reg->mailbox[0]);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR))
+			break;
+		if (mbox_status == MBOX_STS_COMMAND_COMPLETE)
+			break;
+
+		DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
+		    "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
+		    ha->host_no, __func__, ctrl_status, max_wait_time));
+
+		msleep_interruptible(250);
+	} while (!time_after_eq(jiffies, max_wait_time));
+
+	if (mbox_status == MBOX_STS_COMMAND_COMPLETE) {
+		DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n",
+			     ha->host_no, __func__));
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+		       &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		status = QLA_SUCCESS;
+	} else {
+		printk(KERN_INFO "scsi%ld: %s: Boot firmware failed "
+		       "-  mbox status 0x%x\n", ha->host_no, __func__,
+		       mbox_status);
+		status = QLA_ERROR;
+	}
+	return status;
+}
+
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
+{
+#define QL4_LOCK_DRVR_WAIT	60
+#define QL4_LOCK_DRVR_SLEEP	1
+
+	int drvr_wait = QL4_LOCK_DRVR_WAIT;
+	while (drvr_wait) {
+		if (ql4xxx_lock_drvr(a) == 0) {
+			ssleep(QL4_LOCK_DRVR_SLEEP);
+			if (drvr_wait) {
+				DEBUG2(printk("scsi%ld: %s: Waiting for "
+					      "Global Init Semaphore(%d)...\n",
+					      a->host_no,
+					      __func__, drvr_wait));
+			}
+			drvr_wait -= QL4_LOCK_DRVR_SLEEP;
+		} else {
+			DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
+				      "acquired\n", a->host_no, __func__));
+			return QLA_SUCCESS;
+		}
+	}
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_start_firmware - starts qla4xxx firmware
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine performs the necessary steps to start the firmware for
+ * the QLA4010 adapter.
+ **/
+int qla4xxx_start_firmware(struct scsi_qla_host *ha)
+{
+	unsigned long flags = 0;
+	uint32_t mbox_status;
+	int status = QLA_ERROR;
+	int soft_reset = 1;
+	int config_chip = 0;
+
+	if (is_qla4022(ha) | is_qla4032(ha))
+		ql4xxx_set_mac_number(ha);
+
+	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+		return QLA_ERROR;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	DEBUG2(printk("scsi%ld: %s: port_ctrl	= 0x%08X\n", ha->host_no,
+		      __func__, readw(isp_port_ctrl(ha))));
+	DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no,
+		     __func__, readw(isp_port_status(ha))));
+
+	/* Is Hardware already initialized? */
+	if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) {
+		DEBUG(printk("scsi%ld: %s: Hardware has already been "
+			     "initialized\n", ha->host_no, __func__));
+
+		/* Receive firmware boot acknowledgement */
+		mbox_status = readw(&ha->reg->mailbox[0]);
+
+		DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= "
+			      "0x%x\n", ha->host_no, __func__, mbox_status));
+
+		/* Is firmware already booted? */
+		if (mbox_status == 0) {
+			/* F/W not running, must be config by net driver */
+			config_chip = 1;
+			soft_reset = 0;
+		} else {
+			writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+			writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
+				DEBUG2(printk("scsi%ld: %s: Get firmware "
+					      "state -- state = 0x%x\n",
+					      ha->host_no,
+					      __func__, ha->firmware_state));
+				/* F/W is running */
+				if (ha->firmware_state &
+				    FW_STATE_CONFIG_WAIT) {
+					DEBUG2(printk("scsi%ld: %s: Firmware "
+						      "in known state -- "
+						      "config and "
+						      "boot, state = 0x%x\n",
+						      ha->host_no, __func__,
+						      ha->firmware_state));
+					config_chip = 1;
+					soft_reset = 0;
+				}
+			} else {
+				DEBUG2(printk("scsi%ld: %s: Firmware in "
+					      "unknown state -- resetting,"
+					      " state = "
+					      "0x%x\n", ha->host_no, __func__,
+					      ha->firmware_state));
+			}
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+		}
+	} else {
+		DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been "
+			     "started - resetting\n", ha->host_no, __func__));
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ",
+		     ha->host_no, __func__, soft_reset, config_chip));
+	if (soft_reset) {
+		DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no,
+			     __func__));
+		status = qla4xxx_soft_reset(ha);	/* NOTE: acquires drvr
+							 * lock again, but ok */
+		if (status == QLA_ERROR) {
+			DEBUG(printk("scsi%d: %s: Soft Reset failed!\n",
+				     ha->host_no, __func__));
+			ql4xxx_unlock_drvr(ha);
+			return QLA_ERROR;
+		}
+		config_chip = 1;
+
+		/* Reset clears the semaphore, so acquire again */
+		if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+			return QLA_ERROR;
+	}
+
+	if (config_chip) {
+		if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS)
+			status = qla4xxx_start_firmware_from_flash(ha);
+	}
+
+	ql4xxx_unlock_drvr(ha);
+	if (status == QLA_SUCCESS) {
+		if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
+			qla4xxx_get_crash_record(ha);
+
+		qla4xxx_init_rings(ha);
+	} else {
+		DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
+			     ha->host_no, __func__));
+	}
+	return status;
+}
+/**
+ * qla4xxx_free_ddb_index - Free DDBs reserved by firmware
+ * @ha: pointer to adapter structure
+ *
+ * Since firmware is not running in autoconnect mode the DDB indices should
+ * be freed so that when login happens from user space there are free DDB
+ * indices available.
+ **/
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+{
+	int max_ddbs;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+					      &next_idx, &state, &conn_err,
+						NULL, NULL);
+		if (ret == QLA_ERROR) {
+			next_idx++;
+			continue;
+		}
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Freeing DDB index = 0x%x\n", idx));
+			ret = qla4xxx_clear_ddb_entry(ha, idx);
+			if (ret == QLA_ERROR)
+				ql4_printk(KERN_ERR, ha,
+					   "Unable to clear DDB index = "
+					   "0x%x\n", idx);
+		}
+		if (next_idx == 0)
+			break;
+	}
+}
+
+/**
+ * qla4xxx_initialize_adapter - initiailizes hba
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine parforms all of the steps necessary to initialize the adapter.
+ *
+ **/
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
+{
+	int status = QLA_ERROR;
+
+	ha->eeprom_cmd_data = 0;
+
+	ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n");
+	ha->isp_ops->pci_config(ha);
+
+	ha->isp_ops->disable_intrs(ha);
+
+	/* Initialize the Host adapter request/response queues and firmware */
+	if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
+		goto exit_init_hba;
+
+	/*
+	 * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
+	 * Mailbox interrupts must be enabled prior to issuing any mailbox
+	 * command in order to prevent the possibility of losing interrupts
+	 * while switching from polling to interrupt mode. IOCB interrupts are
+	 * enabled via isp_ops->enable_intrs.
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		qla4_83xx_enable_mbox_intrs(ha);
+
+	if (qla4xxx_about_firmware(ha) == QLA_ERROR)
+		goto exit_init_hba;
+
+	if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
+		goto exit_init_hba;
+
+	qla4xxx_init_local_data(ha);
+
+	status = qla4xxx_init_firmware(ha);
+	if (status == QLA_ERROR)
+		goto exit_init_hba;
+
+	if (is_reset == RESET_ADAPTER)
+		qla4xxx_build_ddb_list(ha, is_reset);
+
+	set_bit(AF_ONLINE, &ha->flags);
+
+exit_init_hba:
+	DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
+	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
+	return status;
+}
+
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+		       struct ddb_entry *ddb_entry, uint32_t state)
+{
+	uint32_t old_fw_ddb_device_state;
+	int status = QLA_ERROR;
+
+	old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: DDB - old state = 0x%x, new state = 0x%x for "
+			  "index [%d]\n", __func__,
+			  ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+	ddb_entry->fw_ddb_device_state = state;
+
+	switch (old_fw_ddb_device_state) {
+	case DDB_DS_LOGIN_IN_PROCESS:
+		switch (state) {
+		case DDB_DS_SESSION_ACTIVE:
+		case DDB_DS_DISCOVERY:
+			qla4xxx_update_session_conn_param(ha, ddb_entry);
+			ddb_entry->unblock_sess(ddb_entry->sess);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_SESSION_FAILED:
+		case DDB_DS_NO_CONNECTION_ACTIVE:
+			iscsi_conn_login_event(ddb_entry->conn,
+					       ISCSI_CONN_STATE_FREE);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	case DDB_DS_SESSION_ACTIVE:
+	case DDB_DS_DISCOVERY:
+		switch (state) {
+		case DDB_DS_SESSION_FAILED:
+			/*
+			 * iscsi_session failure  will cause userspace to
+			 * stop the connection which in turn would block the
+			 * iscsi_session and start relogin
+			 */
+			iscsi_session_failure(ddb_entry->sess->dd_data,
+					      ISCSI_ERR_CONN_FAILED);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_NO_CONNECTION_ACTIVE:
+			clear_bit(fw_ddb_index, ha->ddb_idx_map);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	case DDB_DS_SESSION_FAILED:
+		switch (state) {
+		case DDB_DS_SESSION_ACTIVE:
+		case DDB_DS_DISCOVERY:
+			ddb_entry->unblock_sess(ddb_entry->sess);
+			qla4xxx_update_session_conn_param(ha, ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_SESSION_FAILED:
+			iscsi_session_failure(ddb_entry->sess->dd_data,
+					      ISCSI_ERR_CONN_FAILED);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	default:
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+				__func__));
+		break;
+	}
+	return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+	/*
+	 * This triggers a relogin.  After the relogin_timer
+	 * expires, the relogin gets scheduled.  We must wait a
+	 * minimum amount of time since receiving an 0x8014 AEN
+	 * with failed device_state or a logout response before
+	 * we can issue another relogin.
+	 *
+	 * Firmware pads this timeout: (time2wait +1).
+	 * Driver retry to login should be longer than F/W.
+	 * Otherwise F/W will fail
+	 * set_ddb() mbx cmd with 0x4005 since it still
+	 * counting down its time2wait.
+	 */
+	atomic_set(&ddb_entry->relogin_timer, 0);
+	atomic_set(&ddb_entry->retry_relogin_timer,
+		   ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+			     struct ddb_entry *ddb_entry, uint32_t state)
+{
+	uint32_t old_fw_ddb_device_state;
+	int status = QLA_ERROR;
+
+	old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: DDB - old state = 0x%x, new state = 0x%x for "
+			  "index [%d]\n", __func__,
+			  ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+	ddb_entry->fw_ddb_device_state = state;
+
+	switch (old_fw_ddb_device_state) {
+	case DDB_DS_LOGIN_IN_PROCESS:
+	case DDB_DS_NO_CONNECTION_ACTIVE:
+		switch (state) {
+		case DDB_DS_SESSION_ACTIVE:
+			ddb_entry->unblock_sess(ddb_entry->sess);
+			qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_SESSION_FAILED:
+			iscsi_block_session(ddb_entry->sess);
+			if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+				qla4xxx_arm_relogin_timer(ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	case DDB_DS_SESSION_ACTIVE:
+		switch (state) {
+		case DDB_DS_SESSION_FAILED:
+			iscsi_block_session(ddb_entry->sess);
+			if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+				qla4xxx_arm_relogin_timer(ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	case DDB_DS_SESSION_FAILED:
+		switch (state) {
+		case DDB_DS_SESSION_ACTIVE:
+			ddb_entry->unblock_sess(ddb_entry->sess);
+			qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		case DDB_DS_SESSION_FAILED:
+			if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+				qla4xxx_arm_relogin_timer(ddb_entry);
+			status = QLA_SUCCESS;
+			break;
+		}
+		break;
+	default:
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+				  __func__));
+		break;
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+				uint32_t fw_ddb_index,
+				uint32_t state, uint32_t conn_err)
+{
+	struct ddb_entry *ddb_entry;
+	int status = QLA_ERROR;
+
+	/* check for out of range index */
+	if (fw_ddb_index >= MAX_DDB_ENTRIES)
+		goto exit_ddb_event;
+
+	/* Get the corresponging ddb entry */
+	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+	/* Device does not currently exist in our database. */
+	if (ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+			   __func__, fw_ddb_index);
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+			clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+		goto exit_ddb_event;
+	}
+
+	ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
+
+exit_ddb_event:
+	return status;
+}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_dma;
+	uint32_t mbx_sts = 0;
+	int ret;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha =  ddb_entry->ha;
+
+	if (!test_bit(AF_LINK_UP, &ha->flags))
+		return;
+
+	if (ddb_entry->ddb_type != FLASH_DDB) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Skipping login to non FLASH DB"));
+		goto exit_login;
+	}
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_login;
+	}
+
+	if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+		ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+		if (ret == QLA_ERROR)
+			goto exit_login;
+
+		ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+		ha->tot_ddbs++;
+	}
+
+	memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+	       sizeof(struct dev_db_entry));
+	ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+	ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+				    fw_ddb_dma, &mbx_sts);
+	if (ret == QLA_ERROR) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+		goto exit_login;
+	}
+
+	ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+	ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+	if (ret == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+			   sess->targetname);
+		goto exit_login;
+	}
+
+exit_login:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_inline.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_inline.h
new file mode 100644
index 0000000..655b7bb
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_inline.h
@@ -0,0 +1,96 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+/*
+ *
+ * qla4xxx_lookup_ddb_by_fw_index
+ *	This routine locates a device handle given the firmware device
+ *	database index.	 If device doesn't exist, returns NULL.
+ *
+ * Input:
+ *	ha - Pointer to host adapter structure.
+ *	fw_ddb_index - Firmware's device database index
+ *
+ * Returns:
+ *	Pointer to the corresponding internal device database structure
+ */
+static inline struct ddb_entry *
+qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
+{
+	struct ddb_entry *ddb_entry = NULL;
+
+	if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
+	    (ha->fw_ddb_index_map[fw_ddb_index] !=
+		(struct ddb_entry *) INVALID_ENTRY)) {
+		ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
+	}
+
+	DEBUG3(printk("scsi%d: %s: ddb [%d], ddb_entry = %p\n",
+	    ha->host_no, __func__, fw_ddb_index, ddb_entry));
+
+	return ddb_entry;
+}
+
+static inline void
+__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
+{
+	if (is_qla4022(ha) | is_qla4032(ha)) {
+		writel(set_rmask(IMR_SCSI_INTR_ENABLE),
+		       &ha->reg->u1.isp4022.intr_mask);
+		readl(&ha->reg->u1.isp4022.intr_mask);
+	} else {
+		writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+	set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+static inline void
+__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
+{
+	if (is_qla4022(ha) | is_qla4032(ha)) {
+		writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
+		       &ha->reg->u1.isp4022.intr_mask);
+		readl(&ha->reg->u1.isp4022.intr_mask);
+	} else {
+		writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+	clear_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+static inline void
+qla4xxx_enable_intrs(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	__qla4xxx_enable_intrs(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline void
+qla4xxx_disable_intrs(struct scsi_qla_host *ha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	__qla4xxx_disable_intrs(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
+{
+	int type;
+
+	if (chap_entry->flags & BIT_7)
+		type = LOCAL_CHAP;
+	else
+		type = BIDI_CHAP;
+
+	return type;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_iocb.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_iocb.c
new file mode 100644
index 0000000..17222eb
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -0,0 +1,542 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+#include <scsi/scsi_tcq.h>
+
+static int
+qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
+{
+	uint16_t cnt;
+
+	/* Calculate number of free request entries. */
+	if ((req_cnt + 2) >= ha->req_q_count) {
+		cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
+		if (ha->request_in < cnt)
+			ha->req_q_count = cnt - ha->request_in;
+		else
+			ha->req_q_count = REQUEST_QUEUE_DEPTH -
+						(ha->request_in - cnt);
+	}
+
+	/* Check if room for request in request ring. */
+	if ((req_cnt + 2) < ha->req_q_count)
+		return 1;
+	else
+		return 0;
+}
+
+static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
+{
+	/* Advance request queue pointer */
+	if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
+		ha->request_in = 0;
+		ha->request_ptr = ha->request_ring;
+	} else {
+		ha->request_in++;
+		ha->request_ptr++;
+	}
+}
+
+/**
+ * qla4xxx_get_req_pkt - returns a valid entry in request queue.
+ * @ha: Pointer to host adapter structure.
+ * @queue_entry: Pointer to pointer to queue entry structure
+ *
+ * This routine performs the following tasks:
+ *	- returns the current request_in pointer (if queue not full)
+ *	- advances the request_in pointer
+ *	- checks for queue full
+ **/
+static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
+			       struct queue_entry **queue_entry)
+{
+	uint16_t req_cnt = 1;
+
+	if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
+		*queue_entry = ha->request_ptr;
+		memset(*queue_entry, 0, sizeof(**queue_entry));
+
+		qla4xxx_advance_req_ring_ptr(ha);
+		ha->req_q_count -= req_cnt;
+		return QLA_SUCCESS;
+	}
+
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_send_marker_iocb - issues marker iocb to HBA
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ * @lun: SCSI LUN
+ * @marker_type: marker identifier
+ *
+ * This routine issues a marker IOCB.
+ **/
+int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
+	struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
+{
+	struct qla4_marker_entry *marker_entry;
+	unsigned long flags = 0;
+	uint8_t status = QLA_SUCCESS;
+
+	/* Acquire hardware specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Get pointer to the queue entry for the marker */
+	if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
+	    QLA_SUCCESS) {
+		status = QLA_ERROR;
+		goto exit_send_marker;
+	}
+
+	/* Put the marker in the request queue */
+	marker_entry->hdr.entryType = ET_MARKER;
+	marker_entry->hdr.entryCount = 1;
+	marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+	marker_entry->modifier = cpu_to_le16(mrkr_mod);
+	int_to_scsilun(lun, &marker_entry->lun);
+	wmb();
+
+	/* Tell ISP it's got a new I/O request */
+	ha->isp_ops->queue_iocb(ha);
+
+exit_send_marker:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return status;
+}
+
+static struct continuation_t1_entry *
+qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
+{
+	struct continuation_t1_entry *cont_entry;
+
+	cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
+
+	qla4xxx_advance_req_ring_ptr(ha);
+
+	/* Load packet defaults */
+	cont_entry->hdr.entryType = ET_CONTINUE;
+	cont_entry->hdr.entryCount = 1;
+	cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
+
+	return cont_entry;
+}
+
+static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > COMMAND_SEG) {
+		iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
+		if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
+			iocbs++;
+	}
+	return iocbs;
+}
+
+static void qla4xxx_build_scsi_iocbs(struct srb *srb,
+				     struct command_t3_entry *cmd_entry,
+				     uint16_t tot_dsds)
+{
+	struct scsi_qla_host *ha;
+	uint16_t avail_dsds;
+	struct data_seg_a64 *cur_dsd;
+	struct scsi_cmnd *cmd;
+	struct scatterlist *sg;
+	int i;
+
+	cmd = srb->cmd;
+	ha = srb->ha;
+
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		/* No data being transferred */
+		cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
+		return;
+	}
+
+	avail_dsds = COMMAND_SEG;
+	cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
+
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+		dma_addr_t sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			struct continuation_t1_entry *cont_entry;
+
+			cont_entry = qla4xxx_alloc_cont_entry(ha);
+			cur_dsd =
+				(struct data_seg_a64 *)
+				&cont_entry->dataseg[0];
+			avail_dsds = CONTINUE_SEG;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
+		cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
+		cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+
+		cur_dsd++;
+	}
+}
+
+void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
+{
+	writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
+	readl(&ha->qla4_83xx_reg->req_q_in);
+}
+
+void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
+{
+	writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
+	readl(&ha->qla4_83xx_reg->rsp_q_out);
+}
+
+/**
+ * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine notifies the ISP that one or more new request
+ * queue entries have been placed on the request queue.
+ **/
+void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
+{
+	uint32_t dbval = 0;
+
+	dbval = 0x14 | (ha->func_num << 5);
+	dbval = dbval | (0 << 8) | (ha->request_in << 16);
+
+	qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
+}
+
+/**
+ * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine notifies the ISP that one or more response/completion
+ * queue entries have been processed by the driver.
+ * This also clears the interrupt.
+ **/
+void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
+{
+	writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
+	readl(&ha->qla4_82xx_reg->rsp_q_out);
+}
+
+/**
+ * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine is notifies the ISP that one or more new request
+ * queue entries have been placed on the request queue.
+ **/
+void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
+{
+	writel(ha->request_in, &ha->reg->req_q_in);
+	readl(&ha->reg->req_q_in);
+}
+
+/**
+ * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
+ * @ha: pointer to host adapter structure.
+ *
+ * This routine is notifies the ISP that one or more response/completion
+ * queue entries have been processed by the driver.
+ * This also clears the interrupt.
+ **/
+void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
+{
+	writel(ha->response_out, &ha->reg->rsp_q_out);
+	readl(&ha->reg->rsp_q_out);
+}
+
+/**
+ * qla4xxx_send_command_to_isp - issues command to HBA
+ * @ha: pointer to host adapter structure.
+ * @srb: pointer to SCSI Request Block to be sent to ISP
+ *
+ * This routine is called by qla4xxx_queuecommand to build an ISP
+ * command and pass it to the ISP for execution.
+ **/
+int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
+{
+	struct scsi_cmnd *cmd = srb->cmd;
+	struct ddb_entry *ddb_entry;
+	struct command_t3_entry *cmd_entry;
+	int nseg;
+	uint16_t tot_dsds;
+	uint16_t req_cnt;
+	unsigned long flags;
+	uint32_t index;
+
+	/* Get real lun and adapter */
+	ddb_entry = srb->ddb;
+
+	tot_dsds = 0;
+
+	/* Acquire hardware specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	index = (uint32_t)cmd->request->tag;
+
+	/*
+	 * Check to see if adapter is online before placing request on
+	 * request queue.  If a reset occurs and a request is in the queue,
+	 * the firmware will still attempt to process the request, retrieving
+	 * garbage for pointers.
+	 */
+	if (!test_bit(AF_ONLINE, &ha->flags)) {
+		DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
+			      "Do not issue command.\n",
+			      ha->host_no, __func__));
+		goto queuing_error;
+	}
+
+	/* Calculate the number of request entries needed. */
+	nseg = scsi_dma_map(cmd);
+	if (nseg < 0)
+		goto queuing_error;
+	tot_dsds = nseg;
+
+	req_cnt = qla4xxx_calc_request_entries(tot_dsds);
+	if (!qla4xxx_space_in_req_ring(ha, req_cnt))
+		goto queuing_error;
+
+	/* total iocbs active */
+	if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
+		goto queuing_error;
+
+	/* Build command packet */
+	cmd_entry = (struct command_t3_entry *) ha->request_ptr;
+	memset(cmd_entry, 0, sizeof(struct command_t3_entry));
+	cmd_entry->hdr.entryType = ET_COMMAND;
+	cmd_entry->handle = cpu_to_le32(index);
+	cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+
+	int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
+	cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
+	memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
+	cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
+	cmd_entry->hdr.entryCount = req_cnt;
+
+	/* Set data transfer direction control flags
+	 * NOTE: Look at data_direction bits iff there is data to be
+	 *	 transferred, as the data direction bit is sometimed filled
+	 *	 in when there is no data to be transferred */
+	cmd_entry->control_flags = CF_NO_DATA;
+	if (scsi_bufflen(cmd)) {
+		if (cmd->sc_data_direction == DMA_TO_DEVICE)
+			cmd_entry->control_flags = CF_WRITE;
+		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+			cmd_entry->control_flags = CF_READ;
+
+		ha->bytes_xfered += scsi_bufflen(cmd);
+		if (ha->bytes_xfered & ~0xFFFFF){
+			ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
+			ha->bytes_xfered &= 0xFFFFF;
+		}
+	}
+
+	/* Set tagged queueing control flags */
+	cmd_entry->control_flags |= CF_SIMPLE_TAG;
+
+	qla4xxx_advance_req_ring_ptr(ha);
+	qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
+	wmb();
+
+	srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
+
+	/* update counters */
+	srb->state = SRB_ACTIVE_STATE;
+	srb->flags |= SRB_DMA_VALID;
+
+	/* Track IOCB used */
+	ha->iocb_cnt += req_cnt;
+	srb->iocb_cnt = req_cnt;
+	ha->req_q_count -= req_cnt;
+
+	ha->isp_ops->queue_iocb(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_ERROR;
+}
+
+int qla4xxx_send_passthru0(struct iscsi_task *task)
+{
+	struct passthru0 *passthru_iocb;
+	struct iscsi_session *sess = task->conn->session;
+	struct ddb_entry *ddb_entry = sess->dd_data;
+	struct scsi_qla_host *ha = ddb_entry->ha;
+	struct ql4_task_data *task_data = task->dd_data;
+	uint16_t ctrl_flags = 0;
+	unsigned long flags;
+	int ret = QLA_ERROR;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	task_data->iocb_req_cnt = 1;
+	/* Put the IOCB on the request queue */
+	if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
+		goto queuing_error;
+
+	passthru_iocb = (struct passthru0 *) ha->request_ptr;
+
+	memset(passthru_iocb, 0, sizeof(struct passthru0));
+	passthru_iocb->hdr.entryType = ET_PASSTHRU0;
+	passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
+	passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
+	passthru_iocb->handle = task->itt;
+	passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
+	passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
+
+	/* Setup the out & in DSDs */
+	if (task_data->req_len) {
+		memcpy((uint8_t *)task_data->req_buffer +
+		       sizeof(struct iscsi_hdr), task->data, task->data_count);
+		ctrl_flags |= PT_FLAG_SEND_BUFFER;
+		passthru_iocb->out_dsd.base.addrLow =
+					cpu_to_le32(LSDW(task_data->req_dma));
+		passthru_iocb->out_dsd.base.addrHigh =
+					cpu_to_le32(MSDW(task_data->req_dma));
+		passthru_iocb->out_dsd.count =
+					cpu_to_le32(task->data_count +
+						    sizeof(struct iscsi_hdr));
+	}
+	if (task_data->resp_len) {
+		passthru_iocb->in_dsd.base.addrLow =
+					cpu_to_le32(LSDW(task_data->resp_dma));
+		passthru_iocb->in_dsd.base.addrHigh =
+					cpu_to_le32(MSDW(task_data->resp_dma));
+		passthru_iocb->in_dsd.count =
+			cpu_to_le32(task_data->resp_len);
+	}
+
+	ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
+	passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
+
+	/* Update the request pointer */
+	qla4xxx_advance_req_ring_ptr(ha);
+	wmb();
+
+	/* Track IOCB used */
+	ha->iocb_cnt += task_data->iocb_req_cnt;
+	ha->req_q_count -= task_data->iocb_req_cnt;
+	ha->isp_ops->queue_iocb(ha);
+	ret = QLA_SUCCESS;
+
+queuing_error:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return ret;
+}
+
+static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
+{
+	struct mrb *mrb;
+
+	mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
+	if (!mrb)
+		return mrb;
+
+	mrb->ha = ha;
+	return mrb;
+}
+
+static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
+				  uint32_t *in_mbox)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t i;
+	unsigned long flags;
+	uint32_t index = 0;
+
+	/* Acquire hardware specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Get pointer to the queue entry for the marker */
+	rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
+	if (rval != QLA_SUCCESS)
+		goto exit_mbox_iocb;
+
+	index = ha->mrb_index;
+	/* get valid mrb index*/
+	for (i = 0; i < MAX_MRB; i++) {
+		index++;
+		if (index == MAX_MRB)
+			index = 1;
+		if (ha->active_mrb_array[index] == NULL) {
+			ha->mrb_index = index;
+			break;
+		}
+	}
+
+	mrb->iocb_cnt = 1;
+	ha->active_mrb_array[index] = mrb;
+	mrb->mbox->handle = index;
+	mrb->mbox->hdr.entryType = ET_MBOX_CMD;
+	mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
+	memcpy(mrb->mbox->in_mbox, in_mbox, 32);
+	mrb->mbox_cmd = in_mbox[0];
+	wmb();
+
+	ha->iocb_cnt += mrb->iocb_cnt;
+	ha->isp_ops->queue_iocb(ha);
+exit_mbox_iocb:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return rval;
+}
+
+int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
+		      uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
+{
+	uint32_t in_mbox[8];
+	struct mrb *mrb = NULL;
+	int rval = QLA_SUCCESS;
+
+	memset(in_mbox, 0, sizeof(in_mbox));
+
+	mrb = qla4xxx_get_new_mrb(ha);
+	if (!mrb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
+				  __func__));
+		rval = QLA_ERROR;
+		goto exit_ping;
+	}
+
+	in_mbox[0] = MBOX_CMD_PING;
+	in_mbox[1] = options;
+	memcpy(&in_mbox[2], &ipaddr[0], 4);
+	memcpy(&in_mbox[3], &ipaddr[4], 4);
+	memcpy(&in_mbox[4], &ipaddr[8], 4);
+	memcpy(&in_mbox[5], &ipaddr[12], 4);
+	in_mbox[6] = payload_size;
+
+	mrb->pid = pid;
+	rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
+
+	if (rval != QLA_SUCCESS)
+		goto exit_ping;
+
+	return rval;
+exit_ping:
+	kfree(mrb);
+	return rval;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_isr.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_isr.c
new file mode 100644
index 0000000..d2cd33d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_isr.c
@@ -0,0 +1,1622 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+/**
+ * qla4xxx_copy_sense - copy sense data	into cmd sense buffer
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ * @srb: Pointer to srb structure.
+ **/
+static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
+                               struct status_entry *sts_entry,
+                               struct srb *srb)
+{
+	struct scsi_cmnd *cmd = srb->cmd;
+	uint16_t sense_len;
+
+	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+	sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
+	if (sense_len == 0) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:"
+				  " sense len 0\n", ha->host_no,
+				  cmd->device->channel, cmd->device->id,
+				  cmd->device->lun, __func__));
+		ha->status_srb = NULL;
+		return;
+	}
+	/* Save total available sense length,
+	 * not to exceed cmd's sense buffer size */
+	sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
+	srb->req_sense_ptr = cmd->sense_buffer;
+	srb->req_sense_len = sense_len;
+
+	/* Copy sense from sts_entry pkt */
+	sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
+	memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
+
+	DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, "
+		"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
+		cmd->device->channel, cmd->device->id,
+		cmd->device->lun, __func__,
+		sts_entry->senseData[2] & 0x0f,
+		sts_entry->senseData[7],
+		sts_entry->senseData[12],
+		sts_entry->senseData[13]));
+
+	DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
+	srb->flags |= SRB_GOT_SENSE;
+
+	/* Update srb, in case a sts_cont pkt follows */
+	srb->req_sense_ptr += sense_len;
+	srb->req_sense_len -= sense_len;
+	if (srb->req_sense_len != 0)
+		ha->status_srb = srb;
+	else
+		ha->status_srb = NULL;
+}
+
+/**
+ * qla4xxx_status_cont_entry - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @sts_cont: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
+			  struct status_cont_entry *sts_cont)
+{
+	struct srb *srb = ha->status_srb;
+	struct scsi_cmnd *cmd;
+	uint16_t sense_len;
+
+	if (srb == NULL)
+		return;
+
+	cmd = srb->cmd;
+	if (cmd == NULL) {
+		DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
+			"back to OS srb=%p srb->state:%d\n", ha->host_no,
+			__func__, srb, srb->state));
+		ha->status_srb = NULL;
+		return;
+	}
+
+	/* Copy sense data. */
+	sense_len = min_t(uint16_t, srb->req_sense_len,
+			  IOCB_MAX_EXT_SENSEDATA_LEN);
+	memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
+	DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
+
+	srb->req_sense_ptr += sense_len;
+	srb->req_sense_len -= sense_len;
+
+	/* Place command on done queue. */
+	if (srb->req_sense_len == 0) {
+		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+		ha->status_srb = NULL;
+	}
+}
+
+/**
+ * qla4xxx_status_entry - processes status IOCBs
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ **/
+static void qla4xxx_status_entry(struct scsi_qla_host *ha,
+				 struct status_entry *sts_entry)
+{
+	uint8_t scsi_status;
+	struct scsi_cmnd *cmd;
+	struct srb *srb;
+	struct ddb_entry *ddb_entry;
+	uint32_t residual;
+
+	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
+	if (!srb) {
+		ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+			   "handle=0x%0x, srb=%p\n", __func__,
+			   sts_entry->handle, srb);
+		if (is_qla80XX(ha))
+			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+		else
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		return;
+	}
+
+	cmd = srb->cmd;
+	if (cmd == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
+			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
+			      ha->host_no, __func__, sts_entry->handle,
+			      srb, srb->state));
+		ql4_printk(KERN_WARNING, ha, "Command is NULL:"
+		    " already returned to OS (srb=%p)\n", srb);
+		return;
+	}
+
+	ddb_entry = srb->ddb;
+	if (ddb_entry == NULL) {
+		cmd->result = DID_NO_CONNECT << 16;
+		goto status_entry_exit;
+	}
+
+	residual = le32_to_cpu(sts_entry->residualByteCnt);
+
+	/* Translate ISP error to a Linux SCSI error. */
+	scsi_status = sts_entry->scsiStatus;
+	switch (sts_entry->completionStatus) {
+	case SCS_COMPLETE:
+
+		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+			cmd->result = DID_ERROR << 16;
+			break;
+		}
+
+		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
+			scsi_set_resid(cmd, residual);
+			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
+				cmd->underflow)) {
+
+				cmd->result = DID_ERROR << 16;
+
+				DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: "
+					"Mid-layer Data underrun0, "
+					"xferlen = 0x%x, "
+					"residual = 0x%x\n", ha->host_no,
+					cmd->device->channel,
+					cmd->device->id,
+					cmd->device->lun, __func__,
+					scsi_bufflen(cmd), residual));
+				break;
+			}
+		}
+
+		cmd->result = DID_OK << 16 | scsi_status;
+
+		if (scsi_status != SCSI_CHECK_CONDITION)
+			break;
+
+		/* Copy Sense Data into sense buffer. */
+		qla4xxx_copy_sense(ha, sts_entry, srb);
+		break;
+
+	case SCS_INCOMPLETE:
+		/* Always set the status to DID_ERROR, since
+		 * all conditions result in that status anyway */
+		cmd->result = DID_ERROR << 16;
+		break;
+
+	case SCS_RESET_OCCURRED:
+		DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n",
+			      ha->host_no, cmd->device->channel,
+			      cmd->device->id, cmd->device->lun, __func__));
+
+		cmd->result = DID_RESET << 16;
+		break;
+
+	case SCS_ABORTED:
+		DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n",
+			      ha->host_no, cmd->device->channel,
+			      cmd->device->id, cmd->device->lun, __func__));
+
+		cmd->result = DID_RESET << 16;
+		break;
+
+	case SCS_TIMEOUT:
+		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n",
+			      ha->host_no, cmd->device->channel,
+			      cmd->device->id, cmd->device->lun));
+
+		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+
+		/*
+		 * Mark device missing so that we won't continue to send
+		 * I/O to this device.	We should get a ddb state change
+		 * AEN soon.
+		 */
+		if (iscsi_is_session_online(ddb_entry->sess))
+			qla4xxx_mark_device_missing(ddb_entry->sess);
+		break;
+
+	case SCS_DATA_UNDERRUN:
+	case SCS_DATA_OVERRUN:
+		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
+		     (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
+			DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n",
+				      ha->host_no,
+				      cmd->device->channel, cmd->device->id,
+				      cmd->device->lun, __func__));
+
+			cmd->result = DID_ERROR << 16;
+			break;
+		}
+
+		scsi_set_resid(cmd, residual);
+
+		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
+
+			/* Both the firmware and target reported UNDERRUN:
+			 *
+			 * MID-LAYER UNDERFLOW case:
+			 * Some kernels do not properly detect midlayer
+			 * underflow, so we manually check it and return
+			 * ERROR if the minimum required data was not
+			 * received.
+			 *
+			 * ALL OTHER cases:
+			 * Fall thru to check scsi_status
+			 */
+			if (!scsi_status && (scsi_bufflen(cmd) - residual) <
+			    cmd->underflow) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
+						   ha->host_no,
+						   cmd->device->channel,
+						   cmd->device->id,
+						   cmd->device->lun, __func__,
+						   scsi_bufflen(cmd),
+						   residual));
+
+				cmd->result = DID_ERROR << 16;
+				break;
+			}
+
+		} else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
+			   scsi_status != SAM_STAT_BUSY) {
+
+			/*
+			 * The firmware reports UNDERRUN, but the target does
+			 * not report it:
+			 *
+			 *   scsi_status     |    host_byte       device_byte
+			 *                   |     (19:16)          (7:0)
+			 *   =============   |    =========       ===========
+			 *   TASK_SET_FULL   |    DID_OK          scsi_status
+			 *   BUSY            |    DID_OK          scsi_status
+			 *   ALL OTHERS      |    DID_ERROR       scsi_status
+			 *
+			 *   Note: If scsi_status is task set full or busy,
+			 *   then this else if would fall thru to check the
+			 *   scsi_status and return DID_OK.
+			 */
+
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+					  ha->host_no,
+					  cmd->device->channel,
+					  cmd->device->id,
+					  cmd->device->lun, __func__,
+					  residual,
+					  scsi_bufflen(cmd)));
+
+			cmd->result = DID_ERROR << 16 | scsi_status;
+			goto check_scsi_status;
+		}
+
+		cmd->result = DID_OK << 16 | scsi_status;
+
+check_scsi_status:
+		if (scsi_status == SAM_STAT_CHECK_CONDITION)
+			qla4xxx_copy_sense(ha, sts_entry, srb);
+
+		break;
+
+	case SCS_DEVICE_LOGGED_OUT:
+	case SCS_DEVICE_UNAVAILABLE:
+		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE "
+		    "state: 0x%x\n", ha->host_no,
+		    cmd->device->channel, cmd->device->id,
+		    cmd->device->lun, sts_entry->completionStatus));
+		/*
+		 * Mark device missing so that we won't continue to
+		 * send I/O to this device.  We should get a ddb
+		 * state change AEN soon.
+		 */
+		if (iscsi_is_session_online(ddb_entry->sess))
+			qla4xxx_mark_device_missing(ddb_entry->sess);
+
+		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
+		break;
+
+	case SCS_QUEUE_FULL:
+		/*
+		 * SCSI Mid-Layer handles device queue full
+		 */
+		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
+		DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected "
+			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
+			      " iResp=%02x\n", ha->host_no, cmd->device->id,
+			      cmd->device->lun, __func__,
+			      sts_entry->completionStatus,
+			      sts_entry->scsiStatus, sts_entry->state_flags,
+			      sts_entry->iscsiFlags,
+			      sts_entry->iscsiResponse));
+		break;
+
+	default:
+		cmd->result = DID_ERROR << 16;
+		break;
+	}
+
+status_entry_exit:
+
+	/* complete the request, if not waiting for status_continuation pkt */
+	srb->cc_stat = sts_entry->completionStatus;
+	if (ha->status_srb == NULL)
+		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+}
+
+/**
+ * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
+ * @ha: Pointer to host adapter structure.
+ * @sts_entry: Pointer to status entry structure.
+ **/
+static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
+					  struct passthru_status *sts_entry)
+{
+	struct iscsi_task *task;
+	struct ddb_entry *ddb_entry;
+	struct ql4_task_data *task_data;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_conn *conn;
+	itt_t itt;
+	uint32_t fw_ddb_index;
+
+	itt = sts_entry->handle;
+	fw_ddb_index = le32_to_cpu(sts_entry->target);
+
+	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+
+	if (ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
+			   __func__, sts_entry->target);
+		return;
+	}
+
+	cls_conn = ddb_entry->conn;
+	conn = cls_conn->dd_data;
+	spin_lock(&conn->session->back_lock);
+	task = iscsi_itt_to_task(conn, itt);
+	spin_unlock(&conn->session->back_lock);
+
+	if (task == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
+		return;
+	}
+
+	task_data = task->dd_data;
+	memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
+	ha->iocb_cnt -= task_data->iocb_req_cnt;
+	queue_work(ha->task_wq, &task_data->task_work);
+}
+
+static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
+						     uint32_t index)
+{
+	struct mrb *mrb = NULL;
+
+	/* validate handle and remove from active array */
+	if (index >= MAX_MRB)
+		return mrb;
+
+	mrb = ha->active_mrb_array[index];
+	ha->active_mrb_array[index] = NULL;
+	if (!mrb)
+		return mrb;
+
+	/* update counters */
+	ha->iocb_cnt -= mrb->iocb_cnt;
+
+	return mrb;
+}
+
+static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
+				      struct mbox_status_iocb *mbox_sts_entry)
+{
+	struct mrb *mrb;
+	uint32_t status;
+	uint32_t data_size;
+
+	mrb = qla4xxx_del_mrb_from_active_array(ha,
+					le32_to_cpu(mbox_sts_entry->handle));
+
+	if (mrb == NULL) {
+		ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
+			   mbox_sts_entry->handle);
+		return;
+	}
+
+	switch (mrb->mbox_cmd) {
+	case MBOX_CMD_PING:
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
+				  "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
+				  __func__, mrb->mbox_cmd,
+				  mbox_sts_entry->out_mbox[0],
+				  mbox_sts_entry->out_mbox[6]));
+
+		if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
+			status = ISCSI_PING_SUCCESS;
+		else
+			status = mbox_sts_entry->out_mbox[6];
+
+		data_size = sizeof(mbox_sts_entry->out_mbox);
+
+		qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
+					(uint8_t *) mbox_sts_entry->out_mbox);
+		break;
+
+	default:
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
+				  "0x%x\n", __func__, mrb->mbox_cmd));
+	}
+
+	kfree(mrb);
+	return;
+}
+
+/**
+ * qla4xxx_process_response_queue - process response queue completions
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine process response queue completions in interrupt context.
+ * Hardware_lock locked upon entry
+ **/
+void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
+{
+	uint32_t count = 0;
+	struct srb *srb = NULL;
+	struct status_entry *sts_entry;
+
+	/* Process all responses from response queue */
+	while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
+		sts_entry = (struct status_entry *) ha->response_ptr;
+		count++;
+
+		/* Advance pointers for next entry */
+		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
+			ha->response_out = 0;
+			ha->response_ptr = ha->response_ring;
+		} else {
+			ha->response_out++;
+			ha->response_ptr++;
+		}
+
+		/* process entry */
+		switch (sts_entry->hdr.entryType) {
+		case ET_STATUS:
+			/* Common status */
+			qla4xxx_status_entry(ha, sts_entry);
+			break;
+
+		case ET_PASSTHRU_STATUS:
+			if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
+				qla4xxx_passthru_status_entry(ha,
+					(struct passthru_status *)sts_entry);
+			else
+				ql4_printk(KERN_ERR, ha,
+					   "%s: Invalid status received\n",
+					   __func__);
+
+			break;
+
+		case ET_STATUS_CONTINUATION:
+			qla4xxx_status_cont_entry(ha,
+				(struct status_cont_entry *) sts_entry);
+			break;
+
+		case ET_COMMAND:
+			/* ISP device queue is full. Command not
+			 * accepted by ISP.  Queue command for
+			 * later */
+
+			srb = qla4xxx_del_from_active_array(ha,
+						    le32_to_cpu(sts_entry->
+								handle));
+			if (srb == NULL)
+				goto exit_prq_invalid_handle;
+
+			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
+				      "srb %p\n", ha->host_no, __func__, srb));
+
+			/* ETRY normally by sending it back with
+			 * DID_BUS_BUSY */
+			srb->cmd->result = DID_BUS_BUSY << 16;
+			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+			break;
+
+		case ET_CONTINUE:
+			/* Just throw away the continuation entries */
+			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
+				      "ignoring\n", ha->host_no, __func__));
+			break;
+
+		case ET_MBOX_STATUS:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: mbox status IOCB\n", __func__));
+			qla4xxx_mbox_status_entry(ha,
+					(struct mbox_status_iocb *)sts_entry);
+			break;
+
+		default:
+			/*
+			 * Invalid entry in response queue, reset RISC
+			 * firmware.
+			 */
+			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
+				      "response queue \n", ha->host_no,
+				      __func__,
+				      sts_entry->hdr.entryType));
+			goto exit_prq_error;
+		}
+		((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
+		wmb();
+	}
+
+	/*
+	 * Tell ISP we're done with response(s). This also clears the interrupt.
+	 */
+	ha->isp_ops->complete_iocb(ha);
+
+	return;
+
+exit_prq_invalid_handle:
+	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
+		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
+		      sts_entry->completionStatus));
+
+exit_prq_error:
+	ha->isp_ops->complete_iocb(ha);
+	set_bit(DPC_RESET_HA, &ha->dpc_flags);
+}
+
+/**
+ * qla4_83xx_loopback_in_progress: Is loopback in progress?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ **/
+static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
+{
+	int rval = 1;
+
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
+		    (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: Loopback diagnostics in progress\n",
+					  __func__));
+			rval = 1;
+		} else {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: Loopback diagnostics not in progress\n",
+					  __func__));
+			rval = 0;
+		}
+	}
+
+	return rval;
+}
+
+static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
+					uint32_t ipaddr_idx,
+					uint32_t ipaddr_fw_state)
+{
+	uint8_t ipaddr_state;
+	uint8_t ip_idx;
+
+	ip_idx = ipaddr_idx & 0xF;
+	ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state);
+
+	switch (ip_idx) {
+	case 0:
+		ha->ip_config.ipv4_addr_state = ipaddr_state;
+		break;
+	case 1:
+		ha->ip_config.ipv6_link_local_state = ipaddr_state;
+		break;
+	case 2:
+		ha->ip_config.ipv6_addr0_state = ipaddr_state;
+		break;
+	case 3:
+		ha->ip_config.ipv6_addr1_state = ipaddr_state;
+		break;
+	default:
+		ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n",
+			   __func__, ip_idx);
+	}
+}
+
+static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
+					   uint32_t *mbox_sts)
+{
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
+	       &mbox_sts[2], sizeof(uint32_t));
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
+	       &mbox_sts[3], sizeof(uint32_t));
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
+	       &mbox_sts[4], sizeof(uint32_t));
+	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
+	       &mbox_sts[5], sizeof(uint32_t));
+}
+
+/**
+ * qla4xxx_isr_decode_mailbox - decodes mailbox status
+ * @ha: Pointer to host adapter structure.
+ * @mailbox_status: Mailbox status.
+ *
+ * This routine decodes the mailbox status during the ISR.
+ * Hardware_lock locked upon entry. runs in interrupt context.
+ **/
+static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
+				       uint32_t mbox_status)
+{
+	int i;
+	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+	__le32 __iomem *mailbox_out;
+	uint32_t opcode = 0;
+
+	if (is_qla8032(ha) || is_qla8042(ha))
+		mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
+	else if (is_qla8022(ha))
+		mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
+	else
+		mailbox_out = &ha->reg->mailbox[0];
+
+	if ((mbox_status == MBOX_STS_BUSY) ||
+	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
+	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
+		ha->mbox_status[0] = mbox_status;
+
+		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+			/*
+			 * Copy all mailbox registers to a temporary
+			 * location and set mailbox command done flag
+			 */
+			for (i = 0; i < ha->mbox_status_count; i++)
+				ha->mbox_status[i] = readl(&mailbox_out[i]);
+
+			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+
+			if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
+				complete(&ha->mbx_intr_comp);
+		}
+	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
+		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+			mbox_sts[i] = readl(&mailbox_out[i]);
+
+		/* Immediately process the AENs that don't require much work.
+		 * Only queue the database_changed AENs */
+		if (ha->aen_log.count < MAX_AEN_ENTRIES) {
+			for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+				ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
+				    mbox_sts[i];
+			ha->aen_log.count++;
+		}
+		switch (mbox_status) {
+		case MBOX_ASTS_SYSTEM_ERROR:
+			/* Log Mailbox registers */
+			ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
+			qla4xxx_dump_registers(ha);
+
+			if ((is_qla8022(ha) && ql4xdontresethba) ||
+			    ((is_qla8032(ha) || is_qla8042(ha)) &&
+			     qla4_83xx_idc_dontreset(ha))) {
+				DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
+				    ha->host_no, __func__));
+			} else {
+				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			}
+			break;
+
+		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
+		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
+		case MBOX_ASTS_NVRAM_INVALID:
+		case MBOX_ASTS_IP_ADDRESS_CHANGED:
+		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
+			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
+				      "Reset HA\n", ha->host_no, mbox_status));
+			if (is_qla80XX(ha))
+				set_bit(DPC_RESET_HA_FW_CONTEXT,
+					&ha->dpc_flags);
+			else
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			break;
+
+		case MBOX_ASTS_LINK_UP:
+			set_bit(AF_LINK_UP, &ha->flags);
+			if (test_bit(AF_INIT_DONE, &ha->flags))
+				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+
+			ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
+			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
+					      sizeof(mbox_sts),
+					      (uint8_t *) mbox_sts);
+
+			if ((is_qla8032(ha) || is_qla8042(ha)) &&
+			    ha->notify_link_up_comp)
+				complete(&ha->link_up_comp);
+
+			break;
+
+		case MBOX_ASTS_LINK_DOWN:
+			clear_bit(AF_LINK_UP, &ha->flags);
+			if (test_bit(AF_INIT_DONE, &ha->flags)) {
+				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+				qla4xxx_wake_dpc(ha);
+			}
+
+			ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
+			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
+					      sizeof(mbox_sts),
+					      (uint8_t *) mbox_sts);
+			break;
+
+		case MBOX_ASTS_HEARTBEAT:
+			ha->seconds_since_last_heartbeat = 0;
+			break;
+
+		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
+			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
+				      "ACQUIRED\n", ha->host_no, mbox_status));
+			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+			break;
+
+		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
+		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
+							   * mode
+							   * only */
+		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
+		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
+		case MBOX_ASTS_SUBNET_STATE_CHANGE:
+		case MBOX_ASTS_DUPLICATE_IP:
+			/* No action */
+			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
+				      mbox_status));
+			break;
+
+		case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
+			printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
+			    "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
+			    mbox_sts[2], mbox_sts[3]);
+
+			qla4xxx_update_ipaddr_state(ha, mbox_sts[5],
+						    mbox_sts[3]);
+			/* mbox_sts[2] = Old ACB state
+			 * mbox_sts[3] = new ACB state */
+			if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
+			    ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
+			     (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
+				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+			} else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
+				   (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
+				if (is_qla80XX(ha))
+					set_bit(DPC_RESET_HA_FW_CONTEXT,
+						&ha->dpc_flags);
+				else
+					set_bit(DPC_RESET_HA, &ha->dpc_flags);
+			} else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
+					   ha->host_no, __func__);
+			} else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
+				complete(&ha->disable_acb_comp);
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
+					   ha->host_no, __func__);
+			}
+			break;
+
+		case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
+		case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
+		case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
+			/* No action */
+			DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
+					  ha->host_no, mbox_status));
+			break;
+
+		case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, IPv6 ERROR, "
+					  "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			break;
+
+		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
+		case MBOX_ASTS_DNS:
+			/* No action */
+			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
+				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
+				      ha->host_no, mbox_sts[0],
+				      mbox_sts[1], mbox_sts[2]));
+			break;
+
+		case MBOX_ASTS_SELF_TEST_FAILED:
+		case MBOX_ASTS_LOGIN_FAILED:
+			/* No action */
+			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
+				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
+				      ha->host_no, mbox_sts[0], mbox_sts[1],
+				      mbox_sts[2], mbox_sts[3]));
+			break;
+
+		case MBOX_ASTS_DATABASE_CHANGED:
+			/* Queue AEN information and process it in the DPC
+			 * routine */
+			if (ha->aen_q_count > 0) {
+
+				/* decrement available counter */
+				ha->aen_q_count--;
+
+				for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+					ha->aen_q[ha->aen_in].mbox_sts[i] =
+					    mbox_sts[i];
+
+				/* print debug message */
+				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
+					      "mb1:0x%x mb2:0x%x mb3:0x%x "
+					      "mb4:0x%x mb5:0x%x\n",
+					      ha->host_no, ha->aen_in,
+					      mbox_sts[0], mbox_sts[1],
+					      mbox_sts[2], mbox_sts[3],
+					      mbox_sts[4], mbox_sts[5]));
+
+				/* advance pointer */
+				ha->aen_in++;
+				if (ha->aen_in == MAX_AEN_ENTRIES)
+					ha->aen_in = 0;
+
+				/* The DPC routine will process the aen */
+				set_bit(DPC_AEN, &ha->dpc_flags);
+			} else {
+				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
+					      "overflowed!  AEN LOST!!\n",
+					      ha->host_no, __func__,
+					      mbox_sts[0]));
+
+				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
+					      ha->host_no));
+
+				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
+					DEBUG2(printk("AEN[%d] %04x %04x %04x "
+						      "%04x\n", i, mbox_sts[0],
+						      mbox_sts[1], mbox_sts[2],
+						      mbox_sts[3]));
+				}
+			}
+			break;
+
+		case MBOX_ASTS_TXSCVR_INSERTED:
+			DEBUG2(printk(KERN_WARNING
+			    "scsi%ld: AEN %04x Transceiver"
+			    " inserted\n",  ha->host_no, mbox_sts[0]));
+			break;
+
+		case MBOX_ASTS_TXSCVR_REMOVED:
+			DEBUG2(printk(KERN_WARNING
+			    "scsi%ld: AEN %04x Transceiver"
+			    " removed\n",  ha->host_no, mbox_sts[0]));
+			break;
+
+		case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
+			if (is_qla8032(ha) || is_qla8042(ha)) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+						  ha->host_no, mbox_sts[0],
+						  mbox_sts[1], mbox_sts[2],
+						  mbox_sts[3], mbox_sts[4]));
+				opcode = mbox_sts[1] >> 16;
+				if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+				    (opcode == MBOX_CMD_PORT_RESET)) {
+					set_bit(DPC_POST_IDC_ACK,
+						&ha->dpc_flags);
+					ha->idc_info.request_desc = mbox_sts[1];
+					ha->idc_info.info1 = mbox_sts[2];
+					ha->idc_info.info2 = mbox_sts[3];
+					ha->idc_info.info3 = mbox_sts[4];
+					qla4xxx_wake_dpc(ha);
+				}
+			}
+			break;
+
+		case MBOX_ASTS_IDC_COMPLETE:
+			if (is_qla8032(ha) || is_qla8042(ha)) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
+						  ha->host_no, mbox_sts[0],
+						  mbox_sts[1], mbox_sts[2],
+						  mbox_sts[3], mbox_sts[4]));
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "scsi:%ld: AEN %04x IDC Complete notification\n",
+						  ha->host_no, mbox_sts[0]));
+
+				opcode = mbox_sts[1] >> 16;
+				if (ha->notify_idc_comp)
+					complete(&ha->idc_comp);
+
+				if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
+				    (opcode == MBOX_CMD_PORT_RESET))
+					ha->idc_info.info2 = mbox_sts[3];
+
+				if (qla4_83xx_loopback_in_progress(ha)) {
+					set_bit(AF_LOOPBACK, &ha->flags);
+				} else {
+					clear_bit(AF_LOOPBACK, &ha->flags);
+					if (ha->saved_acb)
+						set_bit(DPC_RESTORE_ACB,
+							&ha->dpc_flags);
+				}
+				qla4xxx_wake_dpc(ha);
+			}
+			break;
+
+		case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
+					  ha->host_no, mbox_sts[0]));
+			qla4xxx_default_router_changed(ha, mbox_sts);
+			break;
+
+		case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
+					  ha->host_no, mbox_sts[0]));
+			/* new IDC timeout */
+			ha->idc_extend_tmo = mbox_sts[1];
+			break;
+
+		case MBOX_ASTS_INITIALIZATION_FAILED:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
+					  ha->host_no, mbox_sts[0],
+					  mbox_sts[3]));
+			break;
+
+		case MBOX_ASTS_SYSTEM_WARNING_EVENT:
+			DEBUG2(ql4_printk(KERN_WARNING, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			break;
+
+		case MBOX_ASTS_DCBX_CONF_CHANGE:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
+					  ha->host_no, mbox_sts[0]));
+			break;
+
+		default:
+			DEBUG2(printk(KERN_WARNING
+				      "scsi%ld: AEN %04x UNKNOWN\n",
+				      ha->host_no, mbox_sts[0]));
+			break;
+		}
+	} else {
+		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
+			      ha->host_no, mbox_status));
+
+		ha->mbox_status[0] = mbox_status;
+	}
+}
+
+void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
+					 uint32_t intr_status)
+{
+	/* Process mailbox/asynch event interrupt.*/
+	if (intr_status) {
+		qla4xxx_isr_decode_mailbox(ha,
+				readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+		/* clear the interrupt */
+		writel(0, &ha->qla4_83xx_reg->risc_intr);
+	} else {
+		qla4xxx_process_response_queue(ha);
+	}
+
+	/* clear the interrupt */
+	writel(0, &ha->qla4_83xx_reg->mb_int_mask);
+}
+
+/**
+ * qla4_82xx_interrupt_service_routine - isr
+ * @ha: pointer to host adapter structure.
+ *
+ * This is the main interrupt service routine.
+ * hardware_lock locked upon entry. runs in interrupt context.
+ **/
+void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
+    uint32_t intr_status)
+{
+	/* Process response queue interrupt. */
+	if ((intr_status & HSRX_RISC_IOCB_INT) &&
+	    test_bit(AF_INIT_DONE, &ha->flags))
+		qla4xxx_process_response_queue(ha);
+
+	/* Process mailbox/asynch event interrupt.*/
+	if (intr_status & HSRX_RISC_MB_INT)
+		qla4xxx_isr_decode_mailbox(ha,
+		    readl(&ha->qla4_82xx_reg->mailbox_out[0]));
+
+	/* clear the interrupt */
+	writel(0, &ha->qla4_82xx_reg->host_int);
+	readl(&ha->qla4_82xx_reg->host_int);
+}
+
+/**
+ * qla4xxx_interrupt_service_routine - isr
+ * @ha: pointer to host adapter structure.
+ *
+ * This is the main interrupt service routine.
+ * hardware_lock locked upon entry. runs in interrupt context.
+ **/
+void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
+				       uint32_t intr_status)
+{
+	/* Process response queue interrupt. */
+	if (intr_status & CSR_SCSI_COMPLETION_INTR)
+		qla4xxx_process_response_queue(ha);
+
+	/* Process mailbox/asynch event	 interrupt.*/
+	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
+		qla4xxx_isr_decode_mailbox(ha,
+					   readl(&ha->reg->mailbox[0]));
+
+		/* Clear Mailbox Interrupt */
+		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+		       &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+}
+
+/**
+ * qla4_82xx_spurious_interrupt - processes spurious interrupt
+ * @ha: pointer to host adapter structure.
+ * @reqs_count: .
+ *
+ **/
+static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
+    uint8_t reqs_count)
+{
+	if (reqs_count)
+		return;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
+	if (is_qla8022(ha)) {
+		writel(0, &ha->qla4_82xx_reg->host_int);
+		if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
+			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+			    0xfbff);
+	}
+	ha->spurious_int_count++;
+}
+
+/**
+ * qla4xxx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha;
+	uint32_t intr_status;
+	unsigned long flags = 0;
+	uint8_t reqs_count = 0;
+
+	ha = (struct scsi_qla_host *) dev_id;
+	if (!ha) {
+		DEBUG2(printk(KERN_INFO
+			      "qla4xxx: Interrupt with NULL host ptr\n"));
+		return IRQ_NONE;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	ha->isr_count++;
+	/*
+	 * Repeatedly service interrupts up to a maximum of
+	 * MAX_REQS_SERVICED_PER_INTR
+	 */
+	while (1) {
+		/*
+		 * Read interrupt status
+		 */
+		if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
+		    ha->response_out)
+			intr_status = CSR_SCSI_COMPLETION_INTR;
+		else
+			intr_status = readl(&ha->reg->ctrl_status);
+
+		if ((intr_status &
+		    (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
+			if (reqs_count == 0)
+				ha->spurious_int_count++;
+			break;
+		}
+
+		if (intr_status & CSR_FATAL_ERROR) {
+			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
+				      "Status 0x%04x\n", ha->host_no,
+				      readl(isp_port_error_status (ha))));
+
+			/* Issue Soft Reset to clear this error condition.
+			 * This will prevent the RISC from repeatedly
+			 * interrupting the driver; thus, allowing the DPC to
+			 * get scheduled to continue error recovery.
+			 * NOTE: Disabling RISC interrupts does not work in
+			 * this case, as CSR_FATAL_ERROR overrides
+			 * CSR_SCSI_INTR_ENABLE */
+			if ((readl(&ha->reg->ctrl_status) &
+			     CSR_SCSI_RESET_INTR) == 0) {
+				writel(set_rmask(CSR_SOFT_RESET),
+				       &ha->reg->ctrl_status);
+				readl(&ha->reg->ctrl_status);
+			}
+
+			writel(set_rmask(CSR_FATAL_ERROR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+
+			__qla4xxx_disable_intrs(ha);
+
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+
+			break;
+		} else if (intr_status & CSR_SCSI_RESET_INTR) {
+			clear_bit(AF_ONLINE, &ha->flags);
+			__qla4xxx_disable_intrs(ha);
+
+			writel(set_rmask(CSR_SCSI_RESET_INTR),
+			       &ha->reg->ctrl_status);
+			readl(&ha->reg->ctrl_status);
+
+			if (!test_bit(AF_HA_REMOVAL, &ha->flags))
+				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+
+			break;
+		} else if (intr_status & INTR_PENDING) {
+			ha->isp_ops->interrupt_service_routine(ha, intr_status);
+			ha->total_io_count++;
+			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+				break;
+		}
+	}
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * qla4_82xx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha = dev_id;
+	uint32_t intr_status;
+	uint32_t status;
+	unsigned long flags = 0;
+	uint8_t reqs_count = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return IRQ_HANDLED;
+
+	ha->isr_count++;
+	status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+	if (!(status & ha->nx_legacy_intr.int_vec_bit))
+		return IRQ_NONE;
+
+	status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
+	if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
+		DEBUG7(ql4_printk(KERN_INFO, ha,
+				  "%s legacy Int not triggered\n", __func__));
+		return IRQ_NONE;
+	}
+
+	/* clear the interrupt */
+	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+	/* read twice to ensure write is flushed */
+	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	while (1) {
+		if (!(readl(&ha->qla4_82xx_reg->host_int) &
+		    ISRX_82XX_RISC_INT)) {
+			qla4_82xx_spurious_interrupt(ha, reqs_count);
+			break;
+		}
+		intr_status =  readl(&ha->qla4_82xx_reg->host_status);
+		if ((intr_status &
+		    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
+			qla4_82xx_spurious_interrupt(ha, reqs_count);
+			break;
+		}
+
+		ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+		/* Enable Interrupt */
+		qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+		if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+			break;
+	}
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return IRQ_HANDLED;
+}
+
+#define LEG_INT_PTR_B31		(1 << 31)
+#define LEG_INT_PTR_B30		(1 << 30)
+#define PF_BITS_MASK		(0xF << 16)
+
+/**
+ * qla4_83xx_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ **/
+irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha = dev_id;
+	uint32_t leg_int_ptr = 0;
+	unsigned long flags = 0;
+
+	ha->isr_count++;
+	leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+
+	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+	if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
+		DEBUG7(ql4_printk(KERN_ERR, ha,
+				  "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+				  __func__));
+		return IRQ_NONE;
+	}
+
+	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+	if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
+		DEBUG7(ql4_printk(KERN_ERR, ha,
+				  "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+				  __func__, (leg_int_ptr & PF_BITS_MASK),
+				  ha->pf_bit));
+		return IRQ_NONE;
+	}
+
+	/* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+	 * Control register and poll till Legacy Interrupt Pointer register
+	 * bit30 is 0.
+	 */
+	writel(0, &ha->qla4_83xx_reg->leg_int_trig);
+	do {
+		leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
+		if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
+			break;
+	} while (leg_int_ptr & LEG_INT_PTR_B30);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
+	ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla4_8xxx_msi_handler(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha;
+
+	ha = (struct scsi_qla_host *) dev_id;
+	if (!ha) {
+		DEBUG2(printk(KERN_INFO
+		    "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
+		return IRQ_NONE;
+	}
+
+	ha->isr_count++;
+	/* clear the interrupt */
+	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+	/* read twice to ensure write is flushed */
+	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
+
+	return qla4_8xxx_default_intr_handler(irq, dev_id);
+}
+
+static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha = dev_id;
+	unsigned long flags;
+	uint32_t ival = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	ival = readl(&ha->qla4_83xx_reg->risc_intr);
+	if (ival == 0) {
+		ql4_printk(KERN_INFO, ha,
+			   "%s: It is a spurious mailbox interrupt!\n",
+			   __func__);
+		ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+		ival &= ~INT_MASK_FW_MB;
+		writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+		goto exit;
+	}
+
+	qla4xxx_isr_decode_mailbox(ha,
+				   readl(&ha->qla4_83xx_reg->mailbox_out[0]));
+	writel(0, &ha->qla4_83xx_reg->risc_intr);
+	ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
+	ival &= ~INT_MASK_FW_MB;
+	writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
+	ha->isr_count++;
+exit:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return IRQ_HANDLED;
+}
+
+/**
+ * qla4_8xxx_default_intr_handler - hardware interrupt handler.
+ * @irq: Unused
+ * @dev_id: Pointer to host adapter structure
+ *
+ * This interrupt handler is called directly for MSI-X, and
+ * called indirectly for MSI.
+ **/
+irqreturn_t
+qla4_8xxx_default_intr_handler(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha = dev_id;
+	unsigned long   flags;
+	uint32_t intr_status;
+	uint8_t reqs_count = 0;
+
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		qla4_83xx_mailbox_intr_handler(irq, dev_id);
+	} else {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		while (1) {
+			if (!(readl(&ha->qla4_82xx_reg->host_int) &
+			    ISRX_82XX_RISC_INT)) {
+				qla4_82xx_spurious_interrupt(ha, reqs_count);
+				break;
+			}
+
+			intr_status =  readl(&ha->qla4_82xx_reg->host_status);
+			if ((intr_status &
+			    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
+				qla4_82xx_spurious_interrupt(ha, reqs_count);
+				break;
+			}
+
+			ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
+				break;
+		}
+		ha->isr_count++;
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+	return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
+{
+	struct scsi_qla_host *ha = dev_id;
+	unsigned long flags;
+	int intr_status;
+	uint32_t ival = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
+		if (ival == 0) {
+			ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
+				   __func__);
+			goto exit_msix_rsp_q;
+		}
+		qla4xxx_process_response_queue(ha);
+		writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
+	} else {
+		intr_status = readl(&ha->qla4_82xx_reg->host_status);
+		if (intr_status & HSRX_RISC_IOCB_INT) {
+			qla4xxx_process_response_queue(ha);
+			writel(0, &ha->qla4_82xx_reg->host_int);
+		} else {
+			ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
+				   __func__);
+			goto exit_msix_rsp_q;
+		}
+	}
+	ha->isr_count++;
+exit_msix_rsp_q:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return IRQ_HANDLED;
+}
+
+/**
+ * qla4xxx_process_aen - processes AENs generated by firmware
+ * @ha: pointer to host adapter structure.
+ * @process_aen: type of AENs to process
+ *
+ * Processes specific types of Asynchronous Events generated by firmware.
+ * The type of AENs to process is specified by process_aen and can be
+ *	PROCESS_ALL_AENS	 0
+ *	FLUSH_DDB_CHANGED_AENS	 1
+ *	RELOGIN_DDB_CHANGED_AENS 2
+ **/
+void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
+{
+	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+	struct aen *aen;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	while (ha->aen_out != ha->aen_in) {
+		aen = &ha->aen_q[ha->aen_out];
+		/* copy aen information to local structure */
+		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+			mbox_sts[i] = aen->mbox_sts[i];
+
+		ha->aen_q_count++;
+		ha->aen_out++;
+
+		if (ha->aen_out == MAX_AEN_ENTRIES)
+			ha->aen_out = 0;
+
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
+			" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
+			(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
+			mbox_sts[0], mbox_sts[1], mbox_sts[2],
+			mbox_sts[3], mbox_sts[4]));
+
+		switch (mbox_sts[0]) {
+		case MBOX_ASTS_DATABASE_CHANGED:
+			switch (process_aen) {
+			case FLUSH_DDB_CHANGED_AENS:
+				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
+					      "[%d] state=%04x FLUSHED!\n",
+					      ha->host_no, ha->aen_out,
+					      mbox_sts[0], mbox_sts[2],
+					      mbox_sts[3]));
+				break;
+			case PROCESS_ALL_AENS:
+			default:
+				/* Specific device. */
+				if (mbox_sts[1] == 1)
+					qla4xxx_process_ddb_changed(ha,
+						mbox_sts[2], mbox_sts[3],
+						mbox_sts[4]);
+				break;
+			}
+		}
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+int qla4xxx_request_irqs(struct scsi_qla_host *ha)
+{
+	int ret = 0;
+	int rval = QLA_ERROR;
+
+	if (is_qla40XX(ha))
+		goto try_intx;
+
+	if (ql4xenablemsix == 2) {
+		/* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
+		if (is_qla8032(ha) || is_qla8042(ha)) {
+			ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
+				   __func__, ha->pdev->device);
+			goto try_intx;
+		}
+		goto try_msi;
+	}
+
+	if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
+		goto try_intx;
+
+	/* Trying MSI-X */
+	ret = qla4_8xxx_enable_msix(ha);
+	if (!ret) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+		    "MSI-X: Enabled (0x%X).\n", ha->revision_id));
+		goto irq_attached;
+	} else {
+		if (is_qla8032(ha) || is_qla8042(ha)) {
+			ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
+				   __func__, ha->pdev->device, ret);
+			goto try_intx;
+		}
+	}
+
+	ql4_printk(KERN_WARNING, ha,
+	    "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
+
+try_msi:
+	/* Trying MSI */
+	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+	if (ret > 0) {
+		ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
+			0, DRIVER_NAME, ha);
+		if (!ret) {
+			DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
+			goto irq_attached;
+		} else {
+			ql4_printk(KERN_WARNING, ha,
+			    "MSI: Failed to reserve interrupt %d "
+			    "already in use.\n", ha->pdev->irq);
+			pci_free_irq_vectors(ha->pdev);
+		}
+	}
+
+try_intx:
+	if (is_qla8022(ha)) {
+		ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
+			   __func__);
+		goto irq_not_attached;
+	}
+
+	/* Trying INTx */
+	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
+	    IRQF_SHARED, DRIVER_NAME, ha);
+	if (!ret) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
+		goto irq_attached;
+
+	} else {
+		ql4_printk(KERN_WARNING, ha,
+		    "INTx: Failed to reserve interrupt %d already in"
+		    " use.\n", ha->pdev->irq);
+		goto irq_not_attached;
+	}
+
+irq_attached:
+	set_bit(AF_IRQ_ATTACHED, &ha->flags);
+	ha->host->irq = ha->pdev->irq;
+	ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
+		   __func__, ha->pdev->irq);
+	rval = QLA_SUCCESS;
+irq_not_attached:
+	return rval;
+}
+
+void qla4xxx_free_irqs(struct scsi_qla_host *ha)
+{
+	if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+		return;
+
+	if (ha->pdev->msix_enabled)
+		free_irq(pci_irq_vector(ha->pdev, 1), ha);
+	free_irq(pci_irq_vector(ha->pdev, 0), ha);
+	pci_free_irq_vectors(ha->pdev);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_mbx.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_mbx.c
new file mode 100644
index 0000000..c402fc5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -0,0 +1,2458 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include <linux/ctype.h>
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+#include "ql4_version.h"
+
+void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+			    int in_count)
+{
+	int i;
+
+	/* Load all mailbox registers, except mailbox 0. */
+	for (i = 1; i < in_count; i++)
+		writel(mbx_cmd[i], &ha->reg->mailbox[i]);
+
+	/* Wakeup firmware  */
+	writel(mbx_cmd[0], &ha->reg->mailbox[0]);
+	readl(&ha->reg->mailbox[0]);
+	writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
+	readl(&ha->reg->ctrl_status);
+}
+
+void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+	int intr_status;
+
+	intr_status = readl(&ha->reg->ctrl_status);
+	if (intr_status & INTR_PENDING) {
+		/*
+		 * Service the interrupt.
+		 * The ISR will save the mailbox status registers
+		 * to a temporary storage location in the adapter structure.
+		 */
+		ha->mbox_status_count = out_count;
+		ha->isp_ops->interrupt_service_routine(ha, intr_status);
+	}
+}
+
+/**
+ * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1=polling mode, 0=non-polling mode
+ **/
+static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
+{
+	int rval = 1;
+
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+		    test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
+			rval = 0;
+	} else {
+		if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+		    test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+		    test_bit(AF_ONLINE, &ha->flags) &&
+		    !test_bit(AF_HA_REMOVAL, &ha->flags))
+			rval = 0;
+	}
+
+	return rval;
+}
+
+/**
+ * qla4xxx_mailbox_command - issues mailbox commands
+ * @ha: Pointer to host adapter structure.
+ * @inCount: number of mailbox registers to load.
+ * @outCount: number of mailbox registers to return.
+ * @mbx_cmd: data pointer for mailbox in registers.
+ * @mbx_sts: data pointer for mailbox out registers.
+ *
+ * This routine issue mailbox commands and waits for completion.
+ * If outCount is 0, this routine completes successfully WITHOUT waiting
+ * for the mailbox command to complete.
+ **/
+int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
+			    uint8_t outCount, uint32_t *mbx_cmd,
+			    uint32_t *mbx_sts)
+{
+	int status = QLA_ERROR;
+	uint8_t i;
+	u_long wait_count;
+	unsigned long flags = 0;
+	uint32_t dev_state;
+
+	/* Make sure that pointers are valid */
+	if (!mbx_cmd || !mbx_sts) {
+		DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
+			      "pointer\n", ha->host_no, __func__));
+		return status;
+	}
+
+	if (is_qla40XX(ha)) {
+		if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+			DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+					  "prematurely completing mbx cmd as "
+					  "adapter removal detected\n",
+					  ha->host_no, __func__));
+			return status;
+		}
+	}
+
+	if ((is_aer_supported(ha)) &&
+	    (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
+		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
+		    "timeout MBX Exiting.\n", ha->host_no, __func__));
+		return status;
+	}
+
+	/* Mailbox code active */
+	wait_count = MBOX_TOV * 100;
+
+	while (wait_count--) {
+		mutex_lock(&ha->mbox_sem);
+		if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+			set_bit(AF_MBOX_COMMAND, &ha->flags);
+			mutex_unlock(&ha->mbox_sem);
+			break;
+		}
+		mutex_unlock(&ha->mbox_sem);
+		if (!wait_count) {
+			DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
+				ha->host_no, __func__));
+			return status;
+		}
+		msleep(10);
+	}
+
+	if (is_qla80XX(ha)) {
+		if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+			DEBUG2(ql4_printk(KERN_WARNING, ha,
+					  "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+					  ha->host_no, __func__));
+			goto mbox_exit;
+		}
+		/* Do not send any mbx cmd if h/w is in failed state*/
+		ha->isp_ops->idc_lock(ha);
+		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+		ha->isp_ops->idc_unlock(ha);
+		if (dev_state == QLA8XXX_DEV_FAILED) {
+			ql4_printk(KERN_WARNING, ha,
+				   "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+				   ha->host_no, __func__);
+			goto mbox_exit;
+		}
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	ha->mbox_status_count = outCount;
+	for (i = 0; i < outCount; i++)
+		ha->mbox_status[i] = 0;
+
+	/* Queue the mailbox command to the firmware */
+	ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Wait for completion */
+
+	/*
+	 * If we don't want status, don't wait for the mailbox command to
+	 * complete.  For example, MBOX_CMD_RESET_FW doesn't return status,
+	 * you must poll the inbound Interrupt Mask for completion.
+	 */
+	if (outCount == 0) {
+		status = QLA_SUCCESS;
+		goto mbox_exit;
+	}
+
+	/*
+	 * Wait for completion: Poll or completion queue
+	 */
+	if (qla4xxx_is_intr_poll_mode(ha)) {
+		/* Poll for command to complete */
+		wait_count = jiffies + MBOX_TOV * HZ;
+		while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
+			if (time_after_eq(jiffies, wait_count))
+				break;
+			/*
+			 * Service the interrupt.
+			 * The ISR will save the mailbox status registers
+			 * to a temporary storage location in the adapter
+			 * structure.
+			 */
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			ha->isp_ops->process_mailbox_interrupt(ha, outCount);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			msleep(10);
+		}
+	} else {
+		/* Do not poll for completion. Use completion queue */
+		set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+		wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
+		clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+	}
+
+	/* Check for mailbox timeout. */
+	if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
+		if (is_qla80XX(ha) &&
+		    test_bit(AF_FW_RECOVERY, &ha->flags)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+			    "scsi%ld: %s: prematurely completing mbx cmd as "
+			    "firmware recovery detected\n",
+			    ha->host_no, __func__));
+			goto mbox_exit;
+		}
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n",
+			   ha->host_no, mbx_cmd[0]);
+		ha->mailbox_timeout_count++;
+		mbx_sts[0] = (-1);
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		if (is_qla8022(ha)) {
+			ql4_printk(KERN_INFO, ha,
+				   "disabling pause transmit on port 0 & 1.\n");
+			qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+					CRB_NIU_XG_PAUSE_CTL_P0 |
+					CRB_NIU_XG_PAUSE_CTL_P1);
+		} else if (is_qla8032(ha) || is_qla8042(ha)) {
+			ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
+				   __func__);
+			qla4_83xx_disable_pause(ha);
+		}
+		goto mbox_exit;
+	}
+
+	/*
+	 * Copy the mailbox out registers to the caller's mailbox in/out
+	 * structure.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (i = 0; i < outCount; i++)
+		mbx_sts[i] = ha->mbox_status[i];
+
+	/* Set return status and error flags (if applicable). */
+	switch (ha->mbox_status[0]) {
+	case MBOX_STS_COMMAND_COMPLETE:
+		status = QLA_SUCCESS;
+		break;
+
+	case MBOX_STS_INTERMEDIATE_COMPLETION:
+		status = QLA_SUCCESS;
+		break;
+
+	case MBOX_STS_BUSY:
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
+			   ha->host_no, __func__, mbx_cmd[0]);
+		ha->mailbox_timeout_count++;
+		break;
+
+	default:
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n",
+			   ha->host_no, __func__, mbx_cmd[0], mbx_sts[0],
+			   mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4],
+			   mbx_sts[5], mbx_sts[6], mbx_sts[7]);
+		break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+mbox_exit:
+	mutex_lock(&ha->mbox_sem);
+	clear_bit(AF_MBOX_COMMAND, &ha->flags);
+	mutex_unlock(&ha->mbox_sem);
+	clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+
+	return status;
+}
+
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+				  dma_addr_t phys_addr)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+	mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+	mbox_cmd[2] = LSDW(phys_addr);
+	mbox_cmd[3] = MSDW(phys_addr);
+	mbox_cmd[4] = ha->fw_dump_tmplt_size;
+	mbox_cmd[5] = 0;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+				  ha->host_no, __func__, mbox_cmd[0],
+				  mbox_sts[0], mbox_sts[1]));
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+	mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status == QLA_SUCCESS) {
+		ha->fw_dump_tmplt_size = mbox_sts[1];
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: sts[0]=0x%04x, template  size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+				  __func__, mbox_sts[0], mbox_sts[1],
+				  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+				  mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+		if (ha->fw_dump_tmplt_size == 0)
+			status = QLA_ERROR;
+	} else {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+			   __func__, mbox_sts[0], mbox_sts[1]);
+		status = QLA_ERROR;
+	}
+
+	return status;
+}
+
+void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
+{
+	set_bit(AF_FW_RECOVERY, &ha->flags);
+	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: set FW RECOVERY!\n",
+	    ha->host_no, __func__);
+
+	if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
+		if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags)) {
+			complete(&ha->mbx_intr_comp);
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
+			    "recovery, doing premature completion of "
+			    "mbx cmd\n", ha->host_no, __func__);
+
+		} else {
+			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Due to fw "
+			    "recovery, doing premature completion of "
+			    "polling mbx cmd\n", ha->host_no, __func__);
+		}
+	}
+}
+
+static uint8_t
+qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+		 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+	memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+	memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+
+	if (is_qla8022(ha))
+		qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+
+	mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
+	mbox_cmd[1] = 0;
+	mbox_cmd[2] = LSDW(init_fw_cb_dma);
+	mbox_cmd[3] = MSDW(init_fw_cb_dma);
+	mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+	if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+			      "MBOX_CMD_INITIALIZE_FIRMWARE"
+			      " failed w/ status %04X\n",
+			      ha->host_no, __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+uint8_t
+qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+		 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
+{
+	memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+	memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+	mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
+	mbox_cmd[2] = LSDW(init_fw_cb_dma);
+	mbox_cmd[3] = MSDW(init_fw_cb_dma);
+	mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+	if (qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: "
+			      "MBOX_CMD_GET_INIT_FW_CTRL_BLOCK"
+			      " failed w/ status %04X\n",
+			      ha->host_no, __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state)
+{
+	uint8_t ipaddr_state;
+
+	switch (fw_ipaddr_state) {
+	case IP_ADDRSTATE_UNCONFIGURED:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
+		break;
+	case IP_ADDRSTATE_INVALID:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID;
+		break;
+	case IP_ADDRSTATE_ACQUIRING:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING;
+		break;
+	case IP_ADDRSTATE_TENTATIVE:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE;
+		break;
+	case IP_ADDRSTATE_DEPRICATED:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED;
+		break;
+	case IP_ADDRSTATE_PREFERRED:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_VALID;
+		break;
+	case IP_ADDRSTATE_DISABLING:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING;
+		break;
+	default:
+		ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED;
+	}
+	return ipaddr_state;
+}
+
+static void
+qla4xxx_update_local_ip(struct scsi_qla_host *ha,
+			struct addr_ctrl_blk *init_fw_cb)
+{
+	ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
+	ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
+	ha->ip_config.ipv4_addr_state =
+			qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);
+	ha->ip_config.eth_mtu_size =
+				le16_to_cpu(init_fw_cb->eth_mtu_size);
+	ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
+
+	if (ha->acb_version == ACB_SUPPORTED) {
+		ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
+		ha->ip_config.ipv6_addl_options =
+				le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
+		ha->ip_config.ipv6_tcp_options =
+				le16_to_cpu(init_fw_cb->ipv6_tcp_opts);
+	}
+
+	/* Save IPv4 Address Info */
+	memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
+	       min(sizeof(ha->ip_config.ip_address),
+		   sizeof(init_fw_cb->ipv4_addr)));
+	memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
+	       min(sizeof(ha->ip_config.subnet_mask),
+		   sizeof(init_fw_cb->ipv4_subnet)));
+	memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
+	       min(sizeof(ha->ip_config.gateway),
+		   sizeof(init_fw_cb->ipv4_gw_addr)));
+
+	ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
+	ha->ip_config.control = init_fw_cb->control;
+	ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf;
+	ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos;
+	ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid;
+	ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len;
+	memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid,
+	       min(sizeof(ha->ip_config.ipv4_alt_cid),
+		   sizeof(init_fw_cb->ipv4_dhcp_alt_cid)));
+	ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len;
+	memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid,
+	       min(sizeof(ha->ip_config.ipv4_vid),
+		   sizeof(init_fw_cb->ipv4_dhcp_vid)));
+	ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl;
+	ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
+	ha->ip_config.abort_timer = init_fw_cb->abort_timer;
+	ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts);
+	ha->ip_config.iscsi_max_pdu_size =
+				le16_to_cpu(init_fw_cb->iscsi_max_pdu_size);
+	ha->ip_config.iscsi_first_burst_len =
+				le16_to_cpu(init_fw_cb->iscsi_fburst_len);
+	ha->ip_config.iscsi_max_outstnd_r2t =
+				le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t);
+	ha->ip_config.iscsi_max_burst_len =
+				le16_to_cpu(init_fw_cb->iscsi_max_burst_len);
+	memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name,
+	       min(sizeof(ha->ip_config.iscsi_name),
+		   sizeof(init_fw_cb->iscsi_name)));
+
+	if (is_ipv6_enabled(ha)) {
+		/* Save IPv6 Address */
+		ha->ip_config.ipv6_link_local_state =
+		  qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);
+		ha->ip_config.ipv6_addr0_state =
+			qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);
+		ha->ip_config.ipv6_addr1_state =
+			qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state);
+
+		switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) {
+		case IPV6_RTRSTATE_UNKNOWN:
+			ha->ip_config.ipv6_default_router_state =
+						ISCSI_ROUTER_STATE_UNKNOWN;
+			break;
+		case IPV6_RTRSTATE_MANUAL:
+			ha->ip_config.ipv6_default_router_state =
+						ISCSI_ROUTER_STATE_MANUAL;
+			break;
+		case IPV6_RTRSTATE_ADVERTISED:
+			ha->ip_config.ipv6_default_router_state =
+						ISCSI_ROUTER_STATE_ADVERTISED;
+			break;
+		case IPV6_RTRSTATE_STALE:
+			ha->ip_config.ipv6_default_router_state =
+						ISCSI_ROUTER_STATE_STALE;
+			break;
+		default:
+			ha->ip_config.ipv6_default_router_state =
+						ISCSI_ROUTER_STATE_UNKNOWN;
+		}
+
+		ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
+		ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
+
+		memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
+		       init_fw_cb->ipv6_if_id,
+		       min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
+			   sizeof(init_fw_cb->ipv6_if_id)));
+		memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
+		       min(sizeof(ha->ip_config.ipv6_addr0),
+			   sizeof(init_fw_cb->ipv6_addr0)));
+		memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
+		       min(sizeof(ha->ip_config.ipv6_addr1),
+			   sizeof(init_fw_cb->ipv6_addr1)));
+		memcpy(&ha->ip_config.ipv6_default_router_addr,
+		       init_fw_cb->ipv6_dflt_rtr_addr,
+		       min(sizeof(ha->ip_config.ipv6_default_router_addr),
+			   sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
+		ha->ip_config.ipv6_vlan_tag =
+				be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
+		ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
+		ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id;
+		ha->ip_config.ipv6_flow_lbl =
+				le16_to_cpu(init_fw_cb->ipv6_flow_lbl);
+		ha->ip_config.ipv6_traffic_class =
+				init_fw_cb->ipv6_traffic_class;
+		ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit;
+		ha->ip_config.ipv6_nd_reach_time =
+				le32_to_cpu(init_fw_cb->ipv6_nd_reach_time);
+		ha->ip_config.ipv6_nd_rexmit_timer =
+				le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer);
+		ha->ip_config.ipv6_nd_stale_timeout =
+				le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout);
+		ha->ip_config.ipv6_dup_addr_detect_count =
+					init_fw_cb->ipv6_dup_addr_detect_count;
+		ha->ip_config.ipv6_gw_advrt_mtu =
+				le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu);
+		ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;
+	}
+}
+
+uint8_t
+qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
+			  uint32_t *mbox_cmd,
+			  uint32_t *mbox_sts,
+			  struct addr_ctrl_blk  *init_fw_cb,
+			  dma_addr_t init_fw_cb_dma)
+{
+	if (qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma)
+	    != QLA_SUCCESS) {
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+			      ha->host_no, __func__));
+		return QLA_ERROR;
+	}
+
+	DEBUG2(qla4xxx_dump_buffer(init_fw_cb, sizeof(struct addr_ctrl_blk)));
+
+	/* Save some info in adapter structure. */
+	ha->acb_version = init_fw_cb->acb_version;
+	ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
+	ha->heartbeat_interval = init_fw_cb->hb_interval;
+	memcpy(ha->name_string, init_fw_cb->iscsi_name,
+		min(sizeof(ha->name_string),
+		sizeof(init_fw_cb->iscsi_name)));
+	ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
+	/*memcpy(ha->alias, init_fw_cb->Alias,
+	       min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
+
+	qla4xxx_update_local_ip(ha, init_fw_cb);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_initialize_fw_cb - initializes firmware control block.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
+{
+	struct addr_ctrl_blk *init_fw_cb;
+	dma_addr_t init_fw_cb_dma;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_ERROR;
+
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct addr_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
+	if (init_fw_cb == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
+			      ha->host_no, __func__));
+		goto exit_init_fw_cb_no_free;
+	}
+	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+
+	/* Get Initialize Firmware Control Block. */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
+	    QLA_SUCCESS) {
+		goto exit_init_fw_cb;
+	}
+
+	/* Fill in the request and response queue information. */
+	init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);
+	init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in);
+	init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+	init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+	init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+	init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+	init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+	init_fw_cb->compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+	init_fw_cb->shdwreg_addr_lo = cpu_to_le32(LSDW(ha->shadow_regs_dma));
+	init_fw_cb->shdwreg_addr_hi = cpu_to_le32(MSDW(ha->shadow_regs_dma));
+
+	/* Set up required options. */
+	init_fw_cb->fw_options |=
+		__constant_cpu_to_le16(FWOPT_SESSION_MODE |
+				       FWOPT_INITIATOR_MODE);
+
+	if (is_qla80XX(ha))
+		init_fw_cb->fw_options |=
+		    __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+
+	init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+
+	init_fw_cb->add_fw_options = 0;
+	init_fw_cb->add_fw_options |=
+			__constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
+	init_fw_cb->add_fw_options |=
+			__constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
+
+	if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
+		!= QLA_SUCCESS) {
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: %s: Failed to set init_fw_ctrl_blk\n",
+			      ha->host_no, __func__));
+		goto exit_init_fw_cb;
+	}
+
+	if (qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0],
+		init_fw_cb, init_fw_cb_dma) != QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: Failed to update local ifcb\n",
+				ha->host_no, __func__));
+		goto exit_init_fw_cb;
+	}
+	status = QLA_SUCCESS;
+
+exit_init_fw_cb:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+				init_fw_cb, init_fw_cb_dma);
+exit_init_fw_cb_no_free:
+	return status;
+}
+
+/**
+ * qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
+{
+	struct addr_ctrl_blk *init_fw_cb;
+	dma_addr_t init_fw_cb_dma;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct addr_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
+	if (init_fw_cb == NULL) {
+		printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
+		       __func__);
+		return QLA_ERROR;
+	}
+
+	/* Get Initialize Firmware Control Block. */
+	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
+			      ha->host_no, __func__));
+		dma_free_coherent(&ha->pdev->dev,
+				  sizeof(struct addr_ctrl_blk),
+				  init_fw_cb, init_fw_cb_dma);
+		return QLA_ERROR;
+	}
+
+	/* Save IP Address. */
+	qla4xxx_update_local_ip(ha, init_fw_cb);
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+				init_fw_cb, init_fw_cb_dma);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_firmware_state - gets firmware state of HBA
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Get firmware version */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
+			      "status %04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	ha->firmware_state = mbox_sts[1];
+	ha->board_id = mbox_sts[2];
+	ha->addl_fw_state = mbox_sts[3];
+	DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
+		      ha->host_no, __func__, ha->firmware_state);)
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_firmware_status - retrieves firmware status
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Get firmware version */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
+			      "status %04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+		return QLA_ERROR;
+	}
+
+	/* High-water mark of IOCBs */
+	ha->iocb_hiwat = mbox_sts[2];
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: firmware IOCBs available = %d\n", __func__,
+			  ha->iocb_hiwat));
+
+	if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+		ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+
+	/* Ideally, we should not enter this code, as the # of firmware
+	 * IOCBs is hard-coded in the firmware. We set a default
+	 * iocb_hiwat here just in case */
+	if (ha->iocb_hiwat == 0) {
+		ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "%s: Setting IOCB's to = %d\n", __func__,
+				  ha->iocb_hiwat));
+	}
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @fw_ddb_entry: Pointer to firmware's device database entry structure
+ * @num_valid_ddb_entries: Pointer to number of valid ddb entries
+ * @next_ddb_index: Pointer to next valid device database index
+ * @fw_ddb_device_state: Pointer to device state
+ **/
+int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
+			    uint16_t fw_ddb_index,
+			    struct dev_db_entry *fw_ddb_entry,
+			    dma_addr_t fw_ddb_entry_dma,
+			    uint32_t *num_valid_ddb_entries,
+			    uint32_t *next_ddb_index,
+			    uint32_t *fw_ddb_device_state,
+			    uint32_t *conn_err_detail,
+			    uint16_t *tcp_source_port_num,
+			    uint16_t *connection_id)
+{
+	int status = QLA_ERROR;
+	uint16_t options;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	/* Make sure the device index is valid */
+	if (fw_ddb_index >= MAX_DDB_ENTRIES) {
+		DEBUG2(printk("scsi%ld: %s: ddb [%d] out of range.\n",
+			      ha->host_no, __func__, fw_ddb_index));
+		goto exit_get_fwddb;
+	}
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	if (fw_ddb_entry)
+		memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
+
+	mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
+	mbox_cmd[1] = (uint32_t) fw_ddb_index;
+	mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
+	mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
+	mbox_cmd[4] = sizeof(struct dev_db_entry);
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
+	    QLA_ERROR) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
+			      " with status 0x%04X\n", ha->host_no, __func__,
+			      mbox_sts[0]));
+		goto exit_get_fwddb;
+	}
+	if (fw_ddb_index != mbox_sts[1]) {
+		DEBUG2(printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n",
+			      ha->host_no, __func__, fw_ddb_index,
+			      mbox_sts[1]));
+		goto exit_get_fwddb;
+	}
+	if (fw_ddb_entry) {
+		options = le16_to_cpu(fw_ddb_entry->options);
+		if (options & DDB_OPT_IPV6_DEVICE) {
+			ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
+				"Next %d State %04x ConnErr %08x %pI6 "
+				":%04d \"%s\"\n", __func__, fw_ddb_index,
+				mbox_sts[0], mbox_sts[2], mbox_sts[3],
+				mbox_sts[4], mbox_sts[5],
+				fw_ddb_entry->ip_addr,
+				le16_to_cpu(fw_ddb_entry->port),
+				fw_ddb_entry->iscsi_name);
+		} else {
+			ql4_printk(KERN_INFO, ha, "%s: DDB[%d] MB0 %04x Tot %d "
+				"Next %d State %04x ConnErr %08x %pI4 "
+				":%04d \"%s\"\n", __func__, fw_ddb_index,
+				mbox_sts[0], mbox_sts[2], mbox_sts[3],
+				mbox_sts[4], mbox_sts[5],
+				fw_ddb_entry->ip_addr,
+				le16_to_cpu(fw_ddb_entry->port),
+				fw_ddb_entry->iscsi_name);
+		}
+	}
+	if (num_valid_ddb_entries)
+		*num_valid_ddb_entries = mbox_sts[2];
+	if (next_ddb_index)
+		*next_ddb_index = mbox_sts[3];
+	if (fw_ddb_device_state)
+		*fw_ddb_device_state = mbox_sts[4];
+
+	/*
+	 * RA: This mailbox has been changed to pass connection error and
+	 * details.  Its true for ISP4010 as per Version E - Not sure when it
+	 * was changed.	 Get the time2wait from the fw_dd_entry field :
+	 * default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
+	 * struct.
+	 */
+	if (conn_err_detail)
+		*conn_err_detail = mbox_sts[5];
+	if (tcp_source_port_num)
+		*tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
+	if (connection_id)
+		*connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
+	status = QLA_SUCCESS;
+
+exit_get_fwddb:
+	return status;
+}
+
+int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
+	mbox_cmd[1] = fw_ddb_index;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
+			  __func__, status, mbox_sts[0], mbox_sts[1]));
+	return status;
+}
+
+/**
+ * qla4xxx_set_fwddb_entry - sets a ddb entry.
+ * @ha: Pointer to host adapter structure.
+ * @fw_ddb_index: Firmware's device database index
+ * @fw_ddb_entry_dma: dma address of ddb entry
+ * @mbx_sts: mailbox 0 to be returned or NULL
+ *
+ * This routine initializes or updates the adapter's device database
+ * entry for the specified device.
+ **/
+int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
+			  dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	/* Do not wait for completion. The firmware will send us an
+	 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
+	mbox_cmd[1] = (uint32_t) fw_ddb_index;
+	mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
+	mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
+	mbox_cmd[4] = sizeof(struct dev_db_entry);
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (mbx_sts)
+		*mbx_sts = mbox_sts[0];
+	DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
+	    ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
+
+	return status;
+}
+
+int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
+			       struct ddb_entry *ddb_entry, int options)
+{
+	int status;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+	mbox_cmd[1] = ddb_entry->fw_ddb_index;
+	mbox_cmd[3] = options;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
+				  "failed sts %04X %04X", __func__,
+				  mbox_sts[0], mbox_sts[1]));
+		if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) &&
+		    (mbox_sts[1] == DDB_NOT_LOGGED_IN)) {
+			set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+		}
+	}
+
+	return status;
+}
+
+/**
+ * qla4xxx_get_crash_record - retrieves crash record.
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine retrieves a crash record from the QLA4010 after an 8002h aen.
+ **/
+void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct crash_record *crash_record = NULL;
+	dma_addr_t crash_record_dma = 0;
+	uint32_t crash_record_size = 0;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+	/* Get size of crash record. */
+	mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
+			      ha->host_no, __func__));
+		goto exit_get_crash_record;
+	}
+	crash_record_size = mbox_sts[4];
+	if (crash_record_size == 0) {
+		DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
+			      ha->host_no, __func__));
+		goto exit_get_crash_record;
+	}
+
+	/* Alloc Memory for Crash Record. */
+	crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
+					  &crash_record_dma, GFP_KERNEL);
+	if (crash_record == NULL)
+		goto exit_get_crash_record;
+
+	/* Get Crash Record. */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+	mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
+	mbox_cmd[2] = LSDW(crash_record_dma);
+	mbox_cmd[3] = MSDW(crash_record_dma);
+	mbox_cmd[4] = crash_record_size;
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS)
+		goto exit_get_crash_record;
+
+	/* Dump Crash Record. */
+
+exit_get_crash_record:
+	if (crash_record)
+		dma_free_coherent(&ha->pdev->dev, crash_record_size,
+				  crash_record, crash_record_dma);
+}
+
+/**
+ * qla4xxx_get_conn_event_log - retrieves connection event log
+ * @ha: Pointer to host adapter structure.
+ **/
+void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct conn_event_log_entry *event_log = NULL;
+	dma_addr_t event_log_dma = 0;
+	uint32_t event_log_size = 0;
+	uint32_t num_valid_entries;
+	uint32_t      oldest_entry = 0;
+	uint32_t	max_event_log_entries;
+	uint8_t		i;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+	/* Get size of crash record. */
+	mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS)
+		goto exit_get_event_log;
+
+	event_log_size = mbox_sts[4];
+	if (event_log_size == 0)
+		goto exit_get_event_log;
+
+	/* Alloc Memory for Crash Record. */
+	event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
+				       &event_log_dma, GFP_KERNEL);
+	if (event_log == NULL)
+		goto exit_get_event_log;
+
+	/* Get Crash Record. */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
+	mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
+	mbox_cmd[2] = LSDW(event_log_dma);
+	mbox_cmd[3] = MSDW(event_log_dma);
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
+			      "log!\n", ha->host_no, __func__));
+		goto exit_get_event_log;
+	}
+
+	/* Dump Event Log. */
+	num_valid_entries = mbox_sts[1];
+
+	max_event_log_entries = event_log_size /
+		sizeof(struct conn_event_log_entry);
+
+	if (num_valid_entries > max_event_log_entries)
+		oldest_entry = num_valid_entries % max_event_log_entries;
+
+	DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
+		      ha->host_no, num_valid_entries));
+
+	if (ql4xextended_error_logging == 3) {
+		if (oldest_entry == 0) {
+			/* Circular Buffer has not wrapped around */
+			for (i=0; i < num_valid_entries; i++) {
+				qla4xxx_dump_buffer((uint8_t *)event_log+
+						    (i*sizeof(*event_log)),
+						    sizeof(*event_log));
+			}
+		}
+		else {
+			/* Circular Buffer has wrapped around -
+			 * display accordingly*/
+			for (i=oldest_entry; i < max_event_log_entries; i++) {
+				qla4xxx_dump_buffer((uint8_t *)event_log+
+						    (i*sizeof(*event_log)),
+						    sizeof(*event_log));
+			}
+			for (i=0; i < oldest_entry; i++) {
+				qla4xxx_dump_buffer((uint8_t *)event_log+
+						    (i*sizeof(*event_log)),
+						    sizeof(*event_log));
+			}
+		}
+	}
+
+exit_get_event_log:
+	if (event_log)
+		dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
+				  event_log_dma);
+}
+
+/**
+ * qla4xxx_abort_task - issues Abort Task
+ * @ha: Pointer to host adapter structure.
+ * @srb: Pointer to srb entry
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct scsi_cmnd *cmd = srb->cmd;
+	int status = QLA_SUCCESS;
+	unsigned long flags = 0;
+	uint32_t index;
+
+	/*
+	 * Send abort task command to ISP, so that the ISP will return
+	 * request with ABORT status
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	index = (unsigned long)(unsigned char *)cmd->host_scribble;
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Firmware already posted completion on response queue */
+	if (index == MAX_SRBS)
+		return status;
+
+	mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
+	mbox_cmd[1] = srb->ddb->fw_ddb_index;
+	mbox_cmd[2] = index;
+	/* Immediate Command Enable */
+	mbox_cmd[5] = 0x01;
+
+	qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
+	    &mbox_sts[0]);
+	if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
+		status = QLA_ERROR;
+
+		DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: "
+		    "mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
+		    ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
+		    mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
+	}
+
+	return status;
+}
+
+/**
+ * qla4xxx_reset_lun - issues LUN Reset
+ * @ha: Pointer to host adapter structure.
+ * @ddb_entry: Pointer to device database entry
+ * @lun: lun number
+ *
+ * This routine performs a LUN RESET on the specified target/lun.
+ * The caller must ensure that the ddb_entry and lun_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
+		      uint64_t lun)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	uint32_t scsi_lun[2];
+	int status = QLA_SUCCESS;
+
+	DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no,
+		      ddb_entry->fw_ddb_index, lun));
+
+	/*
+	 * Send lun reset command to ISP, so that the ISP will return all
+	 * outstanding requests with RESET status
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
+
+	mbox_cmd[0] = MBOX_CMD_LUN_RESET;
+	mbox_cmd[1] = ddb_entry->fw_ddb_index;
+	/* FW expects LUN bytes 0-3 in Incoming Mailbox 2
+	 * (LUN byte 0 is LSByte, byte 3 is MSByte) */
+	mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
+	/* FW expects LUN bytes 4-7 in Incoming Mailbox 3
+	 * (LUN byte 4 is LSByte, byte 7 is MSByte) */
+	mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
+	mbox_cmd[5] = 0x01;	/* Immediate Command Enable */
+
+	qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
+	if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
+	    mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
+		status = QLA_ERROR;
+
+	return status;
+}
+
+/**
+ * qla4xxx_reset_target - issues target Reset
+ * @ha: Pointer to host adapter structure.
+ * @db_entry: Pointer to device database entry
+ * @un_entry: Pointer to lun entry structure
+ *
+ * This routine performs a TARGET RESET on the specified target.
+ * The caller must ensure that the ddb_entry pointers
+ * are valid before calling this routine.
+ **/
+int qla4xxx_reset_target(struct scsi_qla_host *ha,
+			 struct ddb_entry *ddb_entry)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	DEBUG2(printk("scsi%ld:%d: target reset issued\n", ha->host_no,
+		      ddb_entry->fw_ddb_index));
+
+	/*
+	 * Send target reset command to ISP, so that the ISP will return all
+	 * outstanding requests with RESET status
+	 */
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_TARGET_WARM_RESET;
+	mbox_cmd[1] = ddb_entry->fw_ddb_index;
+	mbox_cmd[5] = 0x01;	/* Immediate Command Enable */
+
+	qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+				&mbox_sts[0]);
+	if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
+	    mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
+		status = QLA_ERROR;
+
+	return status;
+}
+
+int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
+		      uint32_t offset, uint32_t len)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_READ_FLASH;
+	mbox_cmd[1] = LSDW(dma_addr);
+	mbox_cmd[2] = MSDW(dma_addr);
+	mbox_cmd[3] = offset;
+	mbox_cmd[4] = len;
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
+		    "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
+		    __func__, mbox_sts[0], mbox_sts[1], offset, len));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_about_firmware - gets FW, iscsi draft and boot loader version
+ * @ha: Pointer to host adapter structure.
+ *
+ * Retrieves the FW version, iSCSI draft version & bootloader version of HBA.
+ * Mailboxes 2 & 3 may hold an address for data. Make sure that we write 0 to
+ * those mailboxes, if unused.
+ **/
+int qla4xxx_about_firmware(struct scsi_qla_host *ha)
+{
+	struct about_fw_info *about_fw = NULL;
+	dma_addr_t about_fw_dma;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_ERROR;
+
+	about_fw = dma_alloc_coherent(&ha->pdev->dev,
+				      sizeof(struct about_fw_info),
+				      &about_fw_dma, GFP_KERNEL);
+	if (!about_fw) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
+				  "for about_fw\n", __func__));
+		return status;
+	}
+
+	memset(about_fw, 0, sizeof(struct about_fw_info));
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
+	mbox_cmd[2] = LSDW(about_fw_dma);
+	mbox_cmd[3] = MSDW(about_fw_dma);
+	mbox_cmd[4] = sizeof(struct about_fw_info);
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+					 &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_ABOUT_FW "
+				  "failed w/ status %04X\n", __func__,
+				  mbox_sts[0]));
+		goto exit_about_fw;
+	}
+
+	/* Save version information. */
+	ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
+	ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
+	ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
+	ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
+	memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
+	       sizeof(about_fw->fw_build_date));
+	memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
+	       sizeof(about_fw->fw_build_time));
+	strcpy((char *)ha->fw_info.fw_build_user,
+	       skip_spaces((char *)about_fw->fw_build_user));
+	ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
+	ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+	ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+	ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
+	ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+	ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+	ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
+	strcpy((char *)ha->fw_info.extended_timestamp,
+	       skip_spaces((char *)about_fw->extended_timestamp));
+
+	ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
+	ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
+	status = QLA_SUCCESS;
+
+exit_about_fw:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct about_fw_info),
+			  about_fw, about_fw_dma);
+	return status;
+}
+
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+			    dma_addr_t dma_addr)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
+	mbox_cmd[1] = options;
+	mbox_cmd[2] = LSDW(dma_addr);
+	mbox_cmd[3] = MSDW(dma_addr);
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+	    QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
+		     ha->host_no, __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
+			  uint32_t *mbx_sts)
+{
+	int status;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
+	mbox_cmd[1] = ddb_index;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+				   __func__, mbox_sts[0]));
+	}
+
+	*mbx_sts = mbox_sts[0];
+	return status;
+}
+
+int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
+{
+	int status;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
+	mbox_cmd[1] = ddb_index;
+
+	status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+				   __func__, mbox_sts[0]));
+	}
+
+	return status;
+}
+
+int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
+		      uint32_t offset, uint32_t length, uint32_t options)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
+	mbox_cmd[1] = LSDW(dma_addr);
+	mbox_cmd[2] = MSDW(dma_addr);
+	mbox_cmd[3] = offset;
+	mbox_cmd[4] = length;
+	mbox_cmd[5] = options;
+
+	status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
+				  "failed w/ status %04X, mbx1 %04X\n",
+				  __func__, mbox_sts[0], mbox_sts[1]));
+	}
+	return status;
+}
+
+int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
+			    struct dev_db_entry *fw_ddb_entry,
+			    dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+	uint32_t dev_db_end_offset;
+	int status = QLA_ERROR;
+
+	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+	dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+	dev_db_end_offset = FLASH_OFFSET_DB_END;
+
+	if (dev_db_start_offset > dev_db_end_offset) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s:Invalid DDB index %d", __func__,
+				  ddb_index));
+		goto exit_bootdb_failed;
+	}
+
+	if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+			      sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
+			   "failed\n", ha->host_no, __func__);
+		goto exit_bootdb_failed;
+	}
+
+	if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+		status = QLA_SUCCESS;
+
+exit_bootdb_failed:
+	return status;
+}
+
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+			     struct dev_db_entry *fw_ddb_entry,
+			     dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+	uint32_t dev_db_start_offset;
+	uint32_t dev_db_end_offset;
+	int status = QLA_ERROR;
+
+	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+	if (is_qla40XX(ha)) {
+		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+		dev_db_end_offset = FLASH_OFFSET_DB_END;
+	} else {
+		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+				      (ha->hw.flt_region_ddb << 2);
+		/* flt_ddb_size is DDB table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+		dev_db_end_offset = dev_db_start_offset +
+				    (ha->hw.flt_ddb_size / 2);
+	}
+
+	dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+
+	if (dev_db_start_offset > dev_db_end_offset) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s:Invalid DDB index %d", __func__,
+				  ddb_index));
+		goto exit_fdb_failed;
+	}
+
+	if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+			      sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
+			   ha->host_no, __func__);
+		goto exit_fdb_failed;
+	}
+
+	if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+		status = QLA_SUCCESS;
+
+exit_fdb_failed:
+	return status;
+}
+
+int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
+		     uint16_t idx)
+{
+	int ret = 0;
+	int rval = QLA_ERROR;
+	uint32_t offset = 0, chap_size;
+	struct ql4_chap_table *chap_table;
+	dma_addr_t chap_dma;
+
+	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+	if (chap_table == NULL)
+		return -ENOMEM;
+
+	chap_size = sizeof(struct ql4_chap_table);
+	memset(chap_table, 0, chap_size);
+
+	if (is_qla40XX(ha))
+		offset = FLASH_CHAP_OFFSET | (idx * chap_size);
+	else {
+		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+		/* flt_chap_size is CHAP table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			offset += (ha->hw.flt_chap_size / 2);
+		offset += (idx * chap_size);
+	}
+
+	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+	if (rval != QLA_SUCCESS) {
+		ret = -EINVAL;
+		goto exit_get_chap;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+		__le16_to_cpu(chap_table->cookie)));
+
+	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+		goto exit_get_chap;
+	}
+
+	strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+	strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+	chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+
+exit_get_chap:
+	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+	return ret;
+}
+
+/**
+ * qla4xxx_set_chap - Make a chap entry at the given index
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to set
+ * @password: CHAP password to set
+ * @idx: CHAP index at which to make the entry
+ * @bidi: type of chap entry (chap_in or chap_out)
+ *
+ * Create chap entry at the given index with the information provided.
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+		     uint16_t idx, int bidi)
+{
+	int ret = 0;
+	int rval = QLA_ERROR;
+	uint32_t offset = 0;
+	struct ql4_chap_table *chap_table;
+	uint32_t chap_size = 0;
+	dma_addr_t chap_dma;
+
+	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+	if (chap_table == NULL) {
+		ret =  -ENOMEM;
+		goto exit_set_chap;
+	}
+
+	memset(chap_table, 0, sizeof(struct ql4_chap_table));
+	if (bidi)
+		chap_table->flags |= BIT_6; /* peer */
+	else
+		chap_table->flags |= BIT_7; /* local */
+	chap_table->secret_len = strlen(password);
+	strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
+	strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
+	chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
+
+	if (is_qla40XX(ha)) {
+		chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
+		offset = FLASH_CHAP_OFFSET;
+	} else { /* Single region contains CHAP info for both ports which is
+		  * divided into half for each port.
+		  */
+		chap_size = ha->hw.flt_chap_size / 2;
+		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+		if (ha->port_num == 1)
+			offset += chap_size;
+	}
+
+	offset += (idx * sizeof(struct ql4_chap_table));
+	rval = qla4xxx_set_flash(ha, chap_dma, offset,
+				sizeof(struct ql4_chap_table),
+				FLASH_OPT_RMW_COMMIT);
+
+	if (rval == QLA_SUCCESS && ha->chap_list) {
+		/* Update ha chap_list cache */
+		memcpy((struct ql4_chap_table *)ha->chap_list + idx,
+		       chap_table, sizeof(struct ql4_chap_table));
+	}
+	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+	if (rval != QLA_SUCCESS)
+		ret =  -EINVAL;
+
+exit_set_chap:
+	return ret;
+}
+
+
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+				  char *password, uint16_t chap_index)
+{
+	int rval = QLA_ERROR;
+	struct ql4_chap_table *chap_table = NULL;
+	int max_chap_entries;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+		rval = QLA_ERROR;
+		goto exit_uni_chap;
+	}
+
+	if (!username || !password) {
+		ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
+		rval = QLA_ERROR;
+		goto exit_uni_chap;
+	}
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (chap_index > max_chap_entries) {
+		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+		rval = QLA_ERROR;
+		goto exit_uni_chap;
+	}
+
+	mutex_lock(&ha->chap_sem);
+	chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
+	if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+		rval = QLA_ERROR;
+		goto exit_unlock_uni_chap;
+	}
+
+	if (!(chap_table->flags & BIT_7)) {
+		ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
+		rval = QLA_ERROR;
+		goto exit_unlock_uni_chap;
+	}
+
+	strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+	strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+
+	rval = QLA_SUCCESS;
+
+exit_unlock_uni_chap:
+	mutex_unlock(&ha->chap_sem);
+exit_uni_chap:
+	return rval;
+}
+
+/**
+ * qla4xxx_get_chap_index - Get chap index given username and secret
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to be searched
+ * @password: CHAP password to be searched
+ * @bidi: Is this a BIDI CHAP
+ * @chap_index: CHAP index to be returned
+ *
+ * Match the username and password in the chap_list, return the index if a
+ * match is found. If a match is not found then add the entry in FLASH and
+ * return the index at which entry is written in the FLASH.
+ **/
+int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
+			   char *password, int bidi, uint16_t *chap_index)
+{
+	int i, rval;
+	int free_index = -1;
+	int found_index = 0;
+	int max_chap_entries = 0;
+	struct ql4_chap_table *chap_table;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+						sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+		return QLA_ERROR;
+	}
+
+	if (!username || !password) {
+		ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
+		return QLA_ERROR;
+	}
+
+	mutex_lock(&ha->chap_sem);
+	for (i = 0; i < max_chap_entries; i++) {
+		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+		if (chap_table->cookie !=
+		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+			if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
+				free_index = i;
+			continue;
+		}
+		if (bidi) {
+			if (chap_table->flags & BIT_7)
+				continue;
+		} else {
+			if (chap_table->flags & BIT_6)
+				continue;
+		}
+		if (!strncmp(chap_table->secret, password,
+			     MAX_CHAP_SECRET_LEN) &&
+		    !strncmp(chap_table->name, username,
+			     MAX_CHAP_NAME_LEN)) {
+			*chap_index = i;
+			found_index = 1;
+			break;
+		}
+	}
+
+	/* If chap entry is not present and a free index is available then
+	 * write the entry in flash
+	 */
+	if (!found_index && free_index != -1) {
+		rval = qla4xxx_set_chap(ha, username, password,
+					free_index, bidi);
+		if (!rval) {
+			*chap_index = free_index;
+			found_index = 1;
+		}
+	}
+
+	mutex_unlock(&ha->chap_sem);
+
+	if (found_index)
+		return QLA_SUCCESS;
+	return QLA_ERROR;
+}
+
+int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
+				   uint16_t fw_ddb_index,
+				   uint16_t connection_id,
+				   uint16_t option)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
+	mbox_cmd[1] = fw_ddb_index;
+	mbox_cmd[2] = connection_id;
+	mbox_cmd[3] = option;
+
+	status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
+				  "option %04x failed w/ status %04X %04X\n",
+				  __func__, option, mbox_sts[0], mbox_sts[1]));
+	}
+	return status;
+}
+
+/**
+ * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
+ * @ha: Pointer to host adapter structure.
+ * @ext_tmo: idc timeout value
+ *
+ * Requests firmware to extend the idc timeout value.
+ **/
+static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	ext_tmo &= 0xf;
+
+	mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
+	mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
+		       (ext_tmo << 8));		/* new timeout */
+	mbox_cmd[2] = ha->idc_info.info1;
+	mbox_cmd[3] = ha->idc_info.info2;
+	mbox_cmd[4] = ha->idc_info.info3;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+					 mbox_cmd, mbox_sts);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "scsi%ld: %s: failed status %04X\n",
+				  ha->host_no, __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	} else {
+		ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
+			   __func__, ext_tmo);
+	}
+
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_disable_acb(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
+
+	status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
+				  "failed w/ status %04X %04X %04X", __func__,
+				  mbox_sts[0], mbox_sts[1], mbox_sts[2]));
+	} else {
+		if (is_qla8042(ha) &&
+		    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&
+		    (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
+			/*
+			 * Disable ACB mailbox command takes time to complete
+			 * based on the total number of targets connected.
+			 * For 512 targets, it took approximately 5 secs to
+			 * complete. Setting the timeout value to 8, with the 3
+			 * secs buffer.
+			 */
+			qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
+			if (!wait_for_completion_timeout(&ha->disable_acb_comp,
+							 IDC_EXTEND_TOV * HZ)) {
+				ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
+					   __func__);
+			}
+		}
+	}
+	return status;
+}
+
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+		    uint32_t acb_type, uint32_t len)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_ACB;
+	mbox_cmd[1] = acb_type;
+	mbox_cmd[2] = LSDW(acb_dma);
+	mbox_cmd[3] = MSDW(acb_dma);
+	mbox_cmd[4] = len;
+
+	status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
+				  "failed w/ status %04X\n", __func__,
+				  mbox_sts[0]));
+	}
+	return status;
+}
+
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+		    uint32_t *mbox_sts, dma_addr_t acb_dma)
+{
+	int status = QLA_SUCCESS;
+
+	memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+	memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+	mbox_cmd[0] = MBOX_CMD_SET_ACB;
+	mbox_cmd[1] = 0; /* Primary ACB */
+	mbox_cmd[2] = LSDW(acb_dma);
+	mbox_cmd[3] = MSDW(acb_dma);
+	mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
+
+	status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,  "%s: MBOX_CMD_SET_ACB "
+				  "failed w/ status %04X\n", __func__,
+				  mbox_sts[0]));
+	}
+	return status;
+}
+
+int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
+			       struct ddb_entry *ddb_entry,
+			       struct iscsi_cls_conn *cls_conn,
+			       uint32_t *mbx_sts)
+{
+	struct dev_db_entry *fw_ddb_entry;
+	struct iscsi_conn *conn;
+	struct iscsi_session *sess;
+	struct qla_conn *qla_conn;
+	struct sockaddr *dst_addr;
+	dma_addr_t fw_ddb_entry_dma;
+	int status = QLA_SUCCESS;
+	int rval = 0;
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+	char *ip;
+	uint16_t iscsi_opts = 0;
+	uint32_t options = 0;
+	uint16_t idx, *ptid;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer.\n",
+				  __func__));
+		rval = -ENOMEM;
+		goto exit_set_param_no_free;
+	}
+
+	conn = cls_conn->dd_data;
+	qla_conn = conn->dd_data;
+	sess = conn->session;
+	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
+
+	if (dst_addr->sa_family == AF_INET6)
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (status == QLA_ERROR) {
+		rval = -EINVAL;
+		goto exit_set_param;
+	}
+
+	ptid = (uint16_t *)&fw_ddb_entry->isid[1];
+	*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
+
+	iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
+	memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
+
+	memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
+
+	if (sess->targetname != NULL) {
+		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+		       min(strlen(sess->targetname),
+		       sizeof(fw_ddb_entry->iscsi_name)));
+	}
+
+	memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
+	memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
+
+	fw_ddb_entry->options =  DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
+
+	if (dst_addr->sa_family == AF_INET) {
+		addr = (struct sockaddr_in *)dst_addr;
+		ip = (char *)&addr->sin_addr;
+		memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
+		fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Destination Address [%pI4]: index [%d]\n",
+				   __func__, fw_ddb_entry->ip_addr,
+				  ddb_entry->fw_ddb_index));
+	} else if (dst_addr->sa_family == AF_INET6) {
+		addr6 = (struct sockaddr_in6 *)dst_addr;
+		ip = (char *)&addr6->sin6_addr;
+		memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
+		fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
+		fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Destination Address [%pI6]: index [%d]\n",
+				   __func__, fw_ddb_entry->ip_addr,
+				  ddb_entry->fw_ddb_index));
+	} else {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Failed to get IP Address\n",
+			   __func__);
+		rval = -EINVAL;
+		goto exit_set_param;
+	}
+
+	/* CHAP */
+	if (sess->username != NULL && sess->password != NULL) {
+		if (strlen(sess->username) && strlen(sess->password)) {
+			iscsi_opts |= BIT_7;
+
+			rval = qla4xxx_get_chap_index(ha, sess->username,
+						sess->password,
+						LOCAL_CHAP, &idx);
+			if (rval)
+				goto exit_set_param;
+
+			fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
+		}
+	}
+
+	if (sess->username_in != NULL && sess->password_in != NULL) {
+		/* Check if BIDI CHAP */
+		if (strlen(sess->username_in) && strlen(sess->password_in)) {
+			iscsi_opts |= BIT_4;
+
+			rval = qla4xxx_get_chap_index(ha, sess->username_in,
+						      sess->password_in,
+						      BIDI_CHAP, &idx);
+			if (rval)
+				goto exit_set_param;
+		}
+	}
+
+	if (sess->initial_r2t_en)
+		iscsi_opts |= BIT_10;
+
+	if (sess->imm_data_en)
+		iscsi_opts |= BIT_11;
+
+	fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
+
+	if (conn->max_recv_dlength)
+		fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+		  __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
+
+	if (sess->max_r2t)
+		fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
+
+	if (sess->first_burst)
+		fw_ddb_entry->iscsi_first_burst_len =
+		       __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
+
+	if (sess->max_burst)
+		fw_ddb_entry->iscsi_max_burst_len =
+			__constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
+
+	if (sess->time2wait)
+		fw_ddb_entry->iscsi_def_time2wait =
+			cpu_to_le16(sess->time2wait);
+
+	if (sess->time2retain)
+		fw_ddb_entry->iscsi_def_time2retain =
+			cpu_to_le16(sess->time2retain);
+
+	status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+				       fw_ddb_entry_dma, mbx_sts);
+
+	if (status != QLA_SUCCESS)
+		rval = -EINVAL;
+exit_set_param:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+			  fw_ddb_entry, fw_ddb_entry_dma);
+exit_set_param_no_free:
+	return rval;
+}
+
+int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
+			  uint16_t stats_size, dma_addr_t stats_dma)
+{
+	int status = QLA_SUCCESS;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
+	memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+	mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
+	mbox_cmd[1] = fw_ddb_index;
+	mbox_cmd[2] = LSDW(stats_dma);
+	mbox_cmd[3] = MSDW(stats_dma);
+	mbox_cmd[4] = stats_size;
+
+	status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
+				  "failed w/ status %04X\n", __func__,
+				  mbox_sts[0]));
+	}
+	return status;
+}
+
+int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
+			 uint32_t ip_idx, uint32_t *sts)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status = QLA_SUCCESS;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
+	mbox_cmd[1] = acb_idx;
+	mbox_cmd[2] = ip_idx;
+
+	status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,  "%s: "
+				  "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
+				  "status %04X\n", __func__, mbox_sts[0]));
+	}
+	memcpy(sts, mbox_sts, sizeof(mbox_sts));
+	return status;
+}
+
+int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+		      uint32_t offset, uint32_t size)
+{
+	int status = QLA_SUCCESS;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
+	mbox_cmd[1] = LSDW(nvram_dma);
+	mbox_cmd[2] = MSDW(nvram_dma);
+	mbox_cmd[3] = offset;
+	mbox_cmd[4] = size;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+				  "status %04X\n", ha->host_no, __func__,
+				  mbox_sts[0]));
+	}
+	return status;
+}
+
+int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
+		      uint32_t offset, uint32_t size)
+{
+	int status = QLA_SUCCESS;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
+	mbox_cmd[1] = LSDW(nvram_dma);
+	mbox_cmd[2] = MSDW(nvram_dma);
+	mbox_cmd[3] = offset;
+	mbox_cmd[4] = size;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+				  "status %04X\n", ha->host_no, __func__,
+				  mbox_sts[0]));
+	}
+	return status;
+}
+
+int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
+				     uint32_t region, uint32_t field0,
+				     uint32_t field1)
+{
+	int status = QLA_SUCCESS;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
+	mbox_cmd[3] = region;
+	mbox_cmd[4] = field0;
+	mbox_cmd[5] = field1;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
+					 &mbox_sts[0]);
+	if (status != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+				  "status %04X\n", ha->host_no, __func__,
+				  mbox_sts[0]));
+	}
+	return status;
+}
+
+/**
+ * qla4_8xxx_set_param - set driver version in firmware.
+ * @ha: Pointer to host adapter structure.
+ * @param: Parameter to set i.e driver version
+ **/
+int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	uint32_t status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_SET_PARAM;
+	if (param == SET_DRVR_VERSION) {
+		mbox_cmd[1] = SET_DRVR_VERSION;
+		strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
+			MAX_DRVR_VER_LEN - 1);
+	} else {
+		ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
+			   __func__, param);
+		status = QLA_ERROR;
+		goto exit_set_param;
+	}
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
+					 mbox_sts);
+	if (status == QLA_ERROR)
+		ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
+			   __func__, mbox_sts[0]);
+
+exit_set_param:
+	return status;
+}
+
+/**
+ * qla4_83xx_post_idc_ack - post IDC ACK
+ * @ha: Pointer to host adapter structure.
+ *
+ * Posts IDC ACK for IDC Request Notification AEN.
+ **/
+int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_IDC_ACK;
+	mbox_cmd[1] = ha->idc_info.request_desc;
+	mbox_cmd[2] = ha->idc_info.info1;
+	mbox_cmd[3] = ha->idc_info.info2;
+	mbox_cmd[4] = ha->idc_info.info3;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+					 mbox_cmd, mbox_sts);
+	if (status == QLA_ERROR)
+		ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+			   mbox_sts[0]);
+	else
+	       ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
+
+	return status;
+}
+
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct addr_ctrl_blk *acb = NULL;
+	uint32_t acb_len = sizeof(struct addr_ctrl_blk);
+	int rval = QLA_SUCCESS;
+	dma_addr_t acb_dma;
+
+	acb = dma_alloc_coherent(&ha->pdev->dev,
+				 sizeof(struct addr_ctrl_blk),
+				 &acb_dma, GFP_KERNEL);
+	if (!acb) {
+		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
+		rval = QLA_ERROR;
+		goto exit_config_acb;
+	}
+	memset(acb, 0, acb_len);
+
+	switch (acb_config) {
+	case ACB_CONFIG_DISABLE:
+		rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
+		if (rval != QLA_SUCCESS)
+			goto exit_free_acb;
+
+		rval = qla4xxx_disable_acb(ha);
+		if (rval != QLA_SUCCESS)
+			goto exit_free_acb;
+
+		if (!ha->saved_acb)
+			ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
+
+		if (!ha->saved_acb) {
+			ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+				   __func__);
+			rval = QLA_ERROR;
+			goto exit_free_acb;
+		}
+		memcpy(ha->saved_acb, acb, acb_len);
+		break;
+	case ACB_CONFIG_SET:
+
+		if (!ha->saved_acb) {
+			ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
+				   __func__);
+			rval = QLA_ERROR;
+			goto exit_free_acb;
+		}
+
+		memcpy(acb, ha->saved_acb, acb_len);
+
+		rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+		if (rval != QLA_SUCCESS)
+			goto exit_free_acb;
+
+		break;
+	default:
+		ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
+			   __func__);
+	}
+
+exit_free_acb:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
+			  acb_dma);
+exit_config_acb:
+	if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
+		kfree(ha->saved_acb);
+		ha->saved_acb = NULL;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "%s %s\n", __func__,
+			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+	return rval;
+}
+
+int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+					 mbox_cmd, mbox_sts);
+	if (status == QLA_SUCCESS)
+		*config = mbox_sts[1];
+	else
+		ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+			   mbox_sts[0]);
+
+	return status;
+}
+
+int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	int status;
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG;
+	mbox_cmd[1] = *config;
+
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+				mbox_cmd, mbox_sts);
+	if (status != QLA_SUCCESS)
+		ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
+			   mbox_sts[0]);
+
+	return status;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nvram.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nvram.c
new file mode 100644
index 0000000..3bf418f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -0,0 +1,256 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
+static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
+{
+	writel(cmd, isp_nvram(ha));
+	readl(isp_nvram(ha));
+	udelay(1);
+}
+
+static inline int eeprom_size(struct scsi_qla_host *ha)
+{
+	return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
+}
+
+static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
+{
+	return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
+		FM93C86A_NO_ADDR_BITS_16 ;
+}
+
+static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
+{
+	return FM93C56A_DATA_BITS_16;
+}
+
+static int fm93c56a_select(struct scsi_qla_host * ha)
+{
+	DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
+
+	ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
+	eeprom_cmd(ha->eeprom_cmd_data, ha);
+	return 1;
+}
+
+static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
+{
+	int i;
+	int mask;
+	int dataBit;
+	int previousBit;
+
+	/* Clock in a zero, then do the start bit. */
+	eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
+
+	eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+	       AUBURN_EEPROM_CLK_RISE, ha);
+	eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+	       AUBURN_EEPROM_CLK_FALL, ha);
+
+	mask = 1 << (FM93C56A_CMD_BITS - 1);
+
+	/* Force the previous data bit to be different. */
+	previousBit = 0xffff;
+	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+		dataBit =
+			(cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
+		if (previousBit != dataBit) {
+
+			/*
+			 * If the bit changed, then change the DO state to
+			 * match.
+			 */
+			eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
+			previousBit = dataBit;
+		}
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_RISE, ha);
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_FALL, ha);
+
+		cmd = cmd << 1;
+	}
+	mask = 1 << (eeprom_no_addr_bits(ha) - 1);
+
+	/* Force the previous data bit to be different. */
+	previousBit = 0xffff;
+	for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
+		dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
+			AUBURN_EEPROM_DO_0;
+		if (previousBit != dataBit) {
+			/*
+			 * If the bit changed, then change the DO state to
+			 * match.
+			 */
+			eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
+
+			previousBit = dataBit;
+		}
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_RISE, ha);
+		eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+		       AUBURN_EEPROM_CLK_FALL, ha);
+
+		addr = addr << 1;
+	}
+	return 1;
+}
+
+static int fm93c56a_deselect(struct scsi_qla_host * ha)
+{
+	ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
+	eeprom_cmd(ha->eeprom_cmd_data, ha);
+	return 1;
+}
+
+static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
+{
+	int i;
+	int data = 0;
+	int dataBit;
+
+	/* Read the data bits
+	 * The first bit is a dummy.  Clock right over it. */
+	for (i = 0; i < eeprom_no_data_bits(ha); i++) {
+		eeprom_cmd(ha->eeprom_cmd_data |
+		       AUBURN_EEPROM_CLK_RISE, ha);
+		eeprom_cmd(ha->eeprom_cmd_data |
+		       AUBURN_EEPROM_CLK_FALL, ha);
+
+		dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+
+		data = (data << 1) | dataBit;
+	}
+
+	*value = data;
+	return 1;
+}
+
+static int eeprom_readword(int eepromAddr, u16 * value,
+			   struct scsi_qla_host * ha)
+{
+	fm93c56a_select(ha);
+	fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
+	fm93c56a_datain(ha, value);
+	fm93c56a_deselect(ha);
+	return 1;
+}
+
+/* Hardware_lock must be set before calling */
+u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
+{
+	u16 val = 0;
+
+	/* NOTE: NVRAM uses half-word addresses */
+	eeprom_readword(offset, &val, ha);
+	return val;
+}
+
+u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset)
+{
+	u16 val = 0;
+	u8 rval = 0;
+	int index = 0;
+
+	if (offset & 0x1)
+		index = (offset - 1) / 2;
+	else
+		index = offset / 2;
+
+	val = le16_to_cpu(rd_nvram_word(ha, index));
+
+	if (offset & 0x1)
+		rval = (u8)((val & 0xff00) >> 8);
+	else
+		rval = (u8)((val & 0x00ff));
+
+	return rval;
+}
+
+int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
+{
+	int status = QLA_ERROR;
+	uint16_t checksum = 0;
+	uint32_t index;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (index = 0; index < eeprom_size(ha); index++)
+		checksum += rd_nvram_word(ha, index);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (checksum == 0)
+		status = QLA_SUCCESS;
+
+	return status;
+}
+
+/*************************************************************************
+ *
+ *			Hardware Semaphore routines
+ *
+ *************************************************************************/
+int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
+{
+	uint32_t value;
+	unsigned long flags;
+	unsigned int seconds = 30;
+
+	DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
+		      "0x%x\n", ha->host_no, sem_mask, sem_bits));
+	do {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel((sem_mask | sem_bits), isp_semaphore(ha));
+		value = readw(isp_semaphore(ha));
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		if ((value & (sem_mask >> 16)) == sem_bits) {
+			DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
+				      "code = 0x%x\n", ha->host_no,
+				      sem_mask, sem_bits));
+			return QLA_SUCCESS;
+		}
+		ssleep(1);
+	} while (--seconds);
+	return QLA_ERROR;
+}
+
+void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel(sem_mask, isp_semaphore(ha));
+	readl(isp_semaphore(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
+		      sem_mask));
+}
+
+int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
+{
+	uint32_t value;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	writel((sem_mask | sem_bits), isp_semaphore(ha));
+	value = readw(isp_semaphore(ha));
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if ((value & (sem_mask >> 16)) == sem_bits) {
+		DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
+			      "0x%x, sema code=0x%x\n", ha->host_no,
+			      sem_mask, sem_bits, value));
+		return 1;
+	}
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nvram.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nvram.h
new file mode 100644
index 0000000..e97d79f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -0,0 +1,254 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#ifndef _QL4XNVRM_H_
+#define _QL4XNVRM_H_
+
+/**
+ * AM29LV Flash definitions
+ **/
+#define FM93C56A_SIZE_8	 0x100
+#define FM93C56A_SIZE_16 0x80
+#define FM93C66A_SIZE_8	 0x200
+#define FM93C66A_SIZE_16 0x100/* 4010 */
+#define FM93C86A_SIZE_16 0x400/* 4022 */
+
+#define	 FM93C56A_START	      0x1
+
+/* Commands */
+#define	 FM93C56A_READ	      0x2
+#define	 FM93C56A_WEN	      0x0
+#define	 FM93C56A_WRITE	      0x1
+#define	 FM93C56A_WRITE_ALL   0x0
+#define	 FM93C56A_WDS	      0x0
+#define	 FM93C56A_ERASE	      0x3
+#define	 FM93C56A_ERASE_ALL   0x0
+
+/* Command Extensions */
+#define	 FM93C56A_WEN_EXT	 0x3
+#define	 FM93C56A_WRITE_ALL_EXT	 0x1
+#define	 FM93C56A_WDS_EXT	 0x0
+#define	 FM93C56A_ERASE_ALL_EXT	 0x2
+
+/* Address Bits */
+#define	 FM93C56A_NO_ADDR_BITS_16   8	/* 4010 */
+#define	 FM93C56A_NO_ADDR_BITS_8    9	/* 4010 */
+#define	 FM93C86A_NO_ADDR_BITS_16   10	/* 4022 */
+
+/* Data Bits */
+#define	 FM93C56A_DATA_BITS_16	 16
+#define	 FM93C56A_DATA_BITS_8	 8
+
+/* Special Bits */
+#define	 FM93C56A_READ_DUMMY_BITS   1
+#define	 FM93C56A_READY		    0
+#define	 FM93C56A_BUSY		    1
+#define	 FM93C56A_CMD_BITS	    2
+
+/* Auburn Bits */
+#define	 AUBURN_EEPROM_DI	    0x8
+#define	 AUBURN_EEPROM_DI_0	    0x0
+#define	 AUBURN_EEPROM_DI_1	    0x8
+#define	 AUBURN_EEPROM_DO	    0x4
+#define	 AUBURN_EEPROM_DO_0	    0x0
+#define	 AUBURN_EEPROM_DO_1	    0x4
+#define	 AUBURN_EEPROM_CS	    0x2
+#define	 AUBURN_EEPROM_CS_0	    0x0
+#define	 AUBURN_EEPROM_CS_1	    0x2
+#define	 AUBURN_EEPROM_CLK_RISE	    0x1
+#define	 AUBURN_EEPROM_CLK_FALL	    0x0
+
+/**/
+/* EEPROM format */
+/**/
+struct bios_params {
+	uint16_t SpinUpDelay:1;
+	uint16_t BIOSDisable:1;
+	uint16_t MMAPEnable:1;
+	uint16_t BootEnable:1;
+	uint16_t Reserved0:12;
+	uint8_t bootID0:7;
+	uint8_t bootID0Valid:1;
+	uint8_t bootLUN0[8];
+	uint8_t bootID1:7;
+	uint8_t bootID1Valid:1;
+	uint8_t bootLUN1[8];
+	uint16_t MaxLunsPerTarget;
+	uint8_t Reserved1[10];
+};
+
+struct eeprom_port_cfg {
+
+	/* MTU MAC 0 */
+	u16 etherMtu_mac;
+
+	/* Flow Control MAC 0 */
+	u16 pauseThreshold_mac;
+	u16 resumeThreshold_mac;
+	u16 reserved[13];
+};
+
+struct eeprom_function_cfg {
+	u8 reserved[30];
+
+	/* MAC ADDR */
+	u8 macAddress[6];
+	u8 macAddressSecondary[6];
+	u16 subsysVendorId;
+	u16 subsysDeviceId;
+};
+
+struct eeprom_data {
+	union {
+		struct {	/* isp4010 */
+			u8 asic_id[4]; /* x00 */
+			u8 version;	/* x04 */
+			u8 reserved;	/* x05 */
+			u16 board_id;	/* x06 */
+#define	  EEPROM_BOARDID_ELDORADO    1
+#define	  EEPROM_BOARDID_PLACER	     2
+
+#define EEPROM_SERIAL_NUM_SIZE	     16
+			u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
+
+			/* ExtHwConfig: */
+			/* Offset = 24bytes
+			 *
+			 * | SSRAM Size|     |ST|PD|SDRAM SZ| W| B| SP	|  |
+			 * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+			 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+			 */
+			u16 ext_hw_conf; /* x18 */
+			u8 mac0[6];	/* x1A */
+			u8 mac1[6];	/* x20 */
+			u8 mac2[6];	/* x26 */
+			u8 mac3[6];	/* x2C */
+			u16 etherMtu;	/* x32 */
+			u16 macConfig;	/* x34 */
+#define	 MAC_CONFIG_ENABLE_ANEG	    0x0001
+#define	 MAC_CONFIG_ENABLE_PAUSE    0x0002
+			u16 phyConfig;	/* x36 */
+#define	 PHY_CONFIG_PHY_ADDR_MASK	      0x1f
+#define	 PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
+			u16 reserved_56;	/* x38 */
+
+#define EEPROM_UNUSED_1_SIZE   2
+			u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
+			u16 bufletSize;	/* x3C */
+			u16 bufletCount;	/* x3E */
+			u16 bufletPauseThreshold; /* x40 */
+			u16 tcpWindowThreshold50; /* x42 */
+			u16 tcpWindowThreshold25; /* x44 */
+			u16 tcpWindowThreshold0; /* x46 */
+			u16 ipHashTableBaseHi;	/* x48 */
+			u16 ipHashTableBaseLo;	/* x4A */
+			u16 ipHashTableSize;	/* x4C */
+			u16 tcpHashTableBaseHi;	/* x4E */
+			u16 tcpHashTableBaseLo;	/* x50 */
+			u16 tcpHashTableSize;	/* x52 */
+			u16 ncbTableBaseHi;	/* x54 */
+			u16 ncbTableBaseLo;	/* x56 */
+			u16 ncbTableSize;	/* x58 */
+			u16 drbTableBaseHi;	/* x5A */
+			u16 drbTableBaseLo;	/* x5C */
+			u16 drbTableSize;	/* x5E */
+
+#define EEPROM_UNUSED_2_SIZE   4
+			u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
+			u16 ipReassemblyTimeout; /* x64 */
+			u16 tcpMaxWindowSizeHi;	/* x66 */
+			u16 tcpMaxWindowSizeLo;	/* x68 */
+			u32 net_ip_addr0;	/* x6A Added for TOE
+						 * functionality. */
+			u32 net_ip_addr1;	/* x6E */
+			u32 scsi_ip_addr0;	/* x72 */
+			u32 scsi_ip_addr1;	/* x76 */
+#define EEPROM_UNUSED_3_SIZE   128	/* changed from 144 to account
+					 * for ip addresses */
+			u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
+			u16 subsysVendorId_f0;	/* xFA */
+			u16 subsysDeviceId_f0;	/* xFC */
+
+			/* Address = 0x7F */
+#define FM93C56A_SIGNATURE  0x9356
+#define FM93C66A_SIGNATURE  0x9366
+			u16 signature;	/* xFE */
+
+#define EEPROM_UNUSED_4_SIZE   250
+			u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
+			u16 subsysVendorId_f1;	/* x1FA */
+			u16 subsysDeviceId_f1;	/* x1FC */
+			u16 checksum;	/* x1FE */
+		} __attribute__ ((packed)) isp4010;
+		struct {	/* isp4022 */
+			u8 asicId[4];	/* x00 */
+			u8 version;	/* x04 */
+			u8 reserved_5;	/* x05 */
+			u16 boardId;	/* x06 */
+			u8 boardIdStr[16];	/* x08 */
+			u8 serialNumber[16];	/* x18 */
+
+			/* External Hardware Configuration */
+			u16 ext_hw_conf;	/* x28 */
+
+			/* MAC 0 CONFIGURATION */
+			struct eeprom_port_cfg macCfg_port0; /* x2A */
+
+			/* MAC 1 CONFIGURATION */
+			struct eeprom_port_cfg macCfg_port1; /* x4A */
+
+			/* DDR SDRAM Configuration */
+			u16 bufletSize;	/* x6A */
+			u16 bufletCount;	/* x6C */
+			u16 tcpWindowThreshold50; /* x6E */
+			u16 tcpWindowThreshold25; /* x70 */
+			u16 tcpWindowThreshold0; /* x72 */
+			u16 ipHashTableBaseHi;	/* x74 */
+			u16 ipHashTableBaseLo;	/* x76 */
+			u16 ipHashTableSize;	/* x78 */
+			u16 tcpHashTableBaseHi;	/* x7A */
+			u16 tcpHashTableBaseLo;	/* x7C */
+			u16 tcpHashTableSize;	/* x7E */
+			u16 ncbTableBaseHi;	/* x80 */
+			u16 ncbTableBaseLo;	/* x82 */
+			u16 ncbTableSize;	/* x84 */
+			u16 drbTableBaseHi;	/* x86 */
+			u16 drbTableBaseLo;	/* x88 */
+			u16 drbTableSize;	/* x8A */
+			u16 reserved_142[4];	/* x8C */
+
+			/* TCP/IP Parameters */
+			u16 ipReassemblyTimeout; /* x94 */
+			u16 tcpMaxWindowSize;	/* x96 */
+			u16 ipSecurity;	/* x98 */
+			u8 reserved_156[294]; /* x9A */
+			u16 qDebug[8];	/* QLOGIC USE ONLY   x1C0 */
+			struct eeprom_function_cfg funcCfg_fn0;	/* x1D0 */
+			u16 reserved_510; /* x1FE */
+
+			/* Address = 512 */
+			u8 oemSpace[432]; /* x200 */
+			struct bios_params sBIOSParams_fn1; /* x3B0 */
+			struct eeprom_function_cfg funcCfg_fn1;	/* x3D0 */
+			u16 reserved_1022; /* x3FE */
+
+			/* Address = 1024 */
+			u8 reserved_1024[464];	/* x400 */
+			struct eeprom_function_cfg funcCfg_fn2;	/* x5D0 */
+			u16 reserved_1534; /* x5FE */
+
+			/* Address = 1536 */
+			u8 reserved_1536[432];	/* x600 */
+			struct bios_params sBIOSParams_fn3; /* x7B0 */
+			struct eeprom_function_cfg funcCfg_fn3;	/* x7D0 */
+			u16 checksum;	/* x7FE */
+		} __attribute__ ((packed)) isp4022;
+	};
+};
+
+
+#endif	/* _QL4XNVRM_H_ */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nx.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nx.c
new file mode 100644
index 0000000..e91abb3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nx.c
@@ -0,0 +1,4230 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_inline.h"
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#define TIMEOUT_100_MS	100
+#define MASK(n)		DMA_BIT_MASK(n)
+#define MN_WIN(addr)	(((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr)	(((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr)	(addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M	(0)
+#define QLA82XX_PCI_MS_2M	(0x80000)
+#define QLA82XX_PCI_OCM0_2M	(0xc0000)
+#define VALID_OCM_ADDR(addr)	(((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr)	(addr & MASK(18))
+
+/* CRB window related */
+#define CRB_BLK(off)	((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off)	((off >> 16) & 0xf)
+#define CRB_WINDOW_2M	(0x130060)
+#define CRB_HI(off)	((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+			((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_END	(0x04800800UL)
+#define QLA82XX_PCI_CAMQM_2M_BASE	(0x000ff800UL)
+#define CRB_INDIRECT_2M			(0x1e0000UL)
+
+static inline void __iomem *
+qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off)
+{
+	if ((off < ha->first_page_group_end) &&
+	    (off >= ha->first_page_group_start))
+		return (void __iomem *)(ha->nx_pcibase + off);
+
+	return NULL;
+}
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+static int qla4_8xxx_crb_table_initialized;
+
+#define qla4_8xxx_crb_addr_transform(name) \
+	(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+	 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+static void
+qla4_82xx_crb_addr_transform_setup(void)
+{
+	qla4_8xxx_crb_addr_transform(XDMA);
+	qla4_8xxx_crb_addr_transform(TIMR);
+	qla4_8xxx_crb_addr_transform(SRE);
+	qla4_8xxx_crb_addr_transform(SQN3);
+	qla4_8xxx_crb_addr_transform(SQN2);
+	qla4_8xxx_crb_addr_transform(SQN1);
+	qla4_8xxx_crb_addr_transform(SQN0);
+	qla4_8xxx_crb_addr_transform(SQS3);
+	qla4_8xxx_crb_addr_transform(SQS2);
+	qla4_8xxx_crb_addr_transform(SQS1);
+	qla4_8xxx_crb_addr_transform(SQS0);
+	qla4_8xxx_crb_addr_transform(RPMX7);
+	qla4_8xxx_crb_addr_transform(RPMX6);
+	qla4_8xxx_crb_addr_transform(RPMX5);
+	qla4_8xxx_crb_addr_transform(RPMX4);
+	qla4_8xxx_crb_addr_transform(RPMX3);
+	qla4_8xxx_crb_addr_transform(RPMX2);
+	qla4_8xxx_crb_addr_transform(RPMX1);
+	qla4_8xxx_crb_addr_transform(RPMX0);
+	qla4_8xxx_crb_addr_transform(ROMUSB);
+	qla4_8xxx_crb_addr_transform(SN);
+	qla4_8xxx_crb_addr_transform(QMN);
+	qla4_8xxx_crb_addr_transform(QMS);
+	qla4_8xxx_crb_addr_transform(PGNI);
+	qla4_8xxx_crb_addr_transform(PGND);
+	qla4_8xxx_crb_addr_transform(PGN3);
+	qla4_8xxx_crb_addr_transform(PGN2);
+	qla4_8xxx_crb_addr_transform(PGN1);
+	qla4_8xxx_crb_addr_transform(PGN0);
+	qla4_8xxx_crb_addr_transform(PGSI);
+	qla4_8xxx_crb_addr_transform(PGSD);
+	qla4_8xxx_crb_addr_transform(PGS3);
+	qla4_8xxx_crb_addr_transform(PGS2);
+	qla4_8xxx_crb_addr_transform(PGS1);
+	qla4_8xxx_crb_addr_transform(PGS0);
+	qla4_8xxx_crb_addr_transform(PS);
+	qla4_8xxx_crb_addr_transform(PH);
+	qla4_8xxx_crb_addr_transform(NIU);
+	qla4_8xxx_crb_addr_transform(I2Q);
+	qla4_8xxx_crb_addr_transform(EG);
+	qla4_8xxx_crb_addr_transform(MN);
+	qla4_8xxx_crb_addr_transform(MS);
+	qla4_8xxx_crb_addr_transform(CAS2);
+	qla4_8xxx_crb_addr_transform(CAS1);
+	qla4_8xxx_crb_addr_transform(CAS0);
+	qla4_8xxx_crb_addr_transform(CAM);
+	qla4_8xxx_crb_addr_transform(C2C1);
+	qla4_8xxx_crb_addr_transform(C2C0);
+	qla4_8xxx_crb_addr_transform(SMB);
+	qla4_8xxx_crb_addr_transform(OCM0);
+	qla4_8xxx_crb_addr_transform(I2C0);
+
+	qla4_8xxx_crb_table_initialized = 1;
+}
+
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+	{{{0, 0,         0,         0} } },		/* 0: PCI */
+	{{{1, 0x0100000, 0x0102000, 0x120000},	/* 1: PCIE */
+		{1, 0x0110000, 0x0120000, 0x130000},
+		{1, 0x0120000, 0x0122000, 0x124000},
+		{1, 0x0130000, 0x0132000, 0x126000},
+		{1, 0x0140000, 0x0142000, 0x128000},
+		{1, 0x0150000, 0x0152000, 0x12a000},
+		{1, 0x0160000, 0x0170000, 0x110000},
+		{1, 0x0170000, 0x0172000, 0x12e000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{1, 0x01e0000, 0x01e0800, 0x122000},
+		{0, 0x0000000, 0x0000000, 0x000000} } },
+	{{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+	{{{0, 0,         0,         0} } },	    /* 3: */
+	{{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+	{{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE   */
+	{{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU   */
+	{{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM    */
+	{{{1, 0x0800000, 0x0802000, 0x170000},  /* 8: SQM0  */
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{1, 0x08f0000, 0x08f2000, 0x172000} } },
+	{{{1, 0x0900000, 0x0902000, 0x174000},	/* 9: SQM1*/
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{1, 0x09f0000, 0x09f2000, 0x176000} } },
+	{{{0, 0x0a00000, 0x0a02000, 0x178000},	/* 10: SQM2*/
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{1, 0x0af0000, 0x0af2000, 0x17a000} } },
+	{{{0, 0x0b00000, 0x0b02000, 0x17c000},	/* 11: SQM3*/
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+	{{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+	{{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+	{{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+	{{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+	{{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+	{{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+	{{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+	{{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+	{{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+	{{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+	{{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+	{{{0, 0,         0,         0} } },	/* 23: */
+	{{{0, 0,         0,         0} } },	/* 24: */
+	{{{0, 0,         0,         0} } },	/* 25: */
+	{{{0, 0,         0,         0} } },	/* 26: */
+	{{{0, 0,         0,         0} } },	/* 27: */
+	{{{0, 0,         0,         0} } },	/* 28: */
+	{{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+	{{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+	{{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+	{{{0} } },				/* 32: PCI */
+	{{{1, 0x2100000, 0x2102000, 0x120000},	/* 33: PCIE */
+		{1, 0x2110000, 0x2120000, 0x130000},
+		{1, 0x2120000, 0x2122000, 0x124000},
+		{1, 0x2130000, 0x2132000, 0x126000},
+		{1, 0x2140000, 0x2142000, 0x128000},
+		{1, 0x2150000, 0x2152000, 0x12a000},
+		{1, 0x2160000, 0x2170000, 0x110000},
+		{1, 0x2170000, 0x2172000, 0x12e000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000},
+		{0, 0x0000000, 0x0000000, 0x000000} } },
+	{{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+	{{{0} } },				/* 35: */
+	{{{0} } },				/* 36: */
+	{{{0} } },				/* 37: */
+	{{{0} } },				/* 38: */
+	{{{0} } },				/* 39: */
+	{{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+	{{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+	{{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+	{{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+	{{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+	{{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+	{{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+	{{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+	{{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+	{{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+	{{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+	{{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+	{{{0} } },				/* 52: */
+	{{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+	{{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+	{{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+	{{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+	{{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+	{{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+	{{{0} } },				/* 59: I2C0 */
+	{{{0} } },				/* 60: I2C1 */
+	{{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */
+	{{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+	{{{1, 0x3f00000, 0x3f01000, 0x168000} } }	/* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned qla4_82xx_crb_hub_agt[64] = {
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+	0,
+	0,
+	0,
+	0,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+	0,
+};
+
+/* Device states */
+static char *qdev_state[] = {
+	"Unknown",
+	"Cold",
+	"Initializing",
+	"Ready",
+	"Need Reset",
+	"Need Quiescent",
+	"Failed",
+	"Quiescent",
+};
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
+{
+	u32 win_read;
+
+	ha->crb_win = CRB_HI(*off);
+	writel(ha->crb_win,
+		(void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+	/* Read back value to make sure write has gone through before trying
+	* to use it. */
+	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	if (win_read != ha->crb_win) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+		    "%s: Written crbwin (0x%x) != Read crbwin (0x%x),"
+		    " off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+	}
+	*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+void
+qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
+{
+	unsigned long flags = 0;
+	int rv;
+
+	rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
+
+	BUG_ON(rv == -1);
+
+	if (rv == 1) {
+		write_lock_irqsave(&ha->hw_lock, flags);
+		qla4_82xx_crb_win_lock(ha);
+		qla4_82xx_pci_set_crbwindow_2M(ha, &off);
+	}
+
+	writel(data, (void __iomem *)off);
+
+	if (rv == 1) {
+		qla4_82xx_crb_win_unlock(ha);
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+	}
+}
+
+uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
+{
+	unsigned long flags = 0;
+	int rv;
+	u32 data;
+
+	rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
+
+	BUG_ON(rv == -1);
+
+	if (rv == 1) {
+		write_lock_irqsave(&ha->hw_lock, flags);
+		qla4_82xx_crb_win_lock(ha);
+		qla4_82xx_pci_set_crbwindow_2M(ha, &off);
+	}
+	data = readl((void __iomem *)off);
+
+	if (rv == 1) {
+		qla4_82xx_crb_win_unlock(ha);
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+	}
+	return data;
+}
+
+/* Minidump related functions */
+int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
+{
+	uint32_t win_read, off_value;
+	int rval = QLA_SUCCESS;
+
+	off_value  = off & 0xFFFF0000;
+	writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+	/*
+	 * Read back value to make sure write has gone through before trying
+	 * to use it.
+	 */
+	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	if (win_read != off_value) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+				  __func__, off_value, win_read, off));
+		rval = QLA_ERROR;
+	} else {
+		off_value  = off & 0x0000FFFF;
+		*data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+					       ha->nx_pcibase));
+	}
+	return rval;
+}
+
+int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
+{
+	uint32_t win_read, off_value;
+	int rval = QLA_SUCCESS;
+
+	off_value  = off & 0xFFFF0000;
+	writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+	/* Read back value to make sure write has gone through before trying
+	 * to use it.
+	 */
+	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	if (win_read != off_value) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+				  __func__, off_value, win_read, off));
+		rval = QLA_ERROR;
+	} else {
+		off_value  = off & 0x0000FFFF;
+		writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+					      ha->nx_pcibase));
+	}
+	return rval;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+
+int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
+{
+	int i;
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		/* acquire semaphore3 from PCI HW block */
+		done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+		if (done == 1)
+			break;
+		if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+			return -1;
+
+		timeout++;
+
+		/* Yield CPU */
+		if (!in_interrupt())
+			schedule();
+		else {
+			for (i = 0; i < 20; i++)
+				cpu_relax();    /*This a nop instr on i386*/
+		}
+	}
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
+	return 0;
+}
+
+void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
+{
+	qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+
+/**
+ * qla4_82xx_idc_lock - hw_lock
+ * @ha: pointer to adapter structure
+ *
+ * General purpose lock used to synchronize access to
+ * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
+ **/
+int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
+{
+	int i;
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		/* acquire semaphore5 from PCI HW block */
+		done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+		if (done == 1)
+			break;
+		if (timeout >= IDC_LOCK_TIMEOUT)
+			return -1;
+
+		timeout++;
+
+		/* Yield CPU */
+		if (!in_interrupt())
+			schedule();
+		else {
+			for (i = 0; i < 20; i++)
+				cpu_relax();    /*This a nop instr on i386*/
+		}
+	}
+	return 0;
+}
+
+void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
+{
+	qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+int
+qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
+{
+	struct crb_128M_2M_sub_block_map *m;
+
+	if (*off >= QLA82XX_CRB_MAX)
+		return -1;
+
+	if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
+		*off = (*off - QLA82XX_PCI_CAMQM) +
+		    QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+		return 0;
+	}
+
+	if (*off < QLA82XX_PCI_CRBSPACE)
+		return -1;
+
+	*off -= QLA82XX_PCI_CRBSPACE;
+	/*
+	 * Try direct map
+	 */
+
+	m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+
+	if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
+		*off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+		return 0;
+	}
+
+	/*
+	 * Not in direct map, use crb window
+	 */
+	return 1;
+}
+
+/*
+* check memory access boundary.
+* used by test agent. support ddr access only for now
+*/
+static unsigned long
+qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
+		unsigned long long addr, int size)
+{
+	if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+	    QLA8XXX_ADDR_DDR_NET_MAX) ||
+	    !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
+	    QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
+	    ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
+		return 0;
+	}
+	return 1;
+}
+
+static int qla4_82xx_pci_set_window_warning_count;
+
+static unsigned long
+qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
+{
+	int window;
+	u32 win_read;
+
+	if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+	    QLA8XXX_ADDR_DDR_NET_MAX)) {
+		/* DDR network side */
+		window = MN_WIN(addr);
+		ha->ddr_mn_window = window;
+		qla4_82xx_wr_32(ha, ha->mn_win_crb |
+		    QLA82XX_PCI_CRBSPACE, window);
+		win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
+		    QLA82XX_PCI_CRBSPACE);
+		if ((win_read << 17) != window) {
+			ql4_printk(KERN_WARNING, ha,
+			"%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+			__func__, window, win_read);
+		}
+		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+				QLA8XXX_ADDR_OCM0_MAX)) {
+		unsigned int temp1;
+		/* if bits 19:18&17:11 are on */
+		if ((addr & 0x00ff800) == 0xff800) {
+			printk("%s: QM access not handled.\n", __func__);
+			addr = -1UL;
+		}
+
+		window = OCM_WIN(addr);
+		ha->ddr_mn_window = window;
+		qla4_82xx_wr_32(ha, ha->mn_win_crb |
+		    QLA82XX_PCI_CRBSPACE, window);
+		win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
+		    QLA82XX_PCI_CRBSPACE);
+		temp1 = ((window & 0x1FF) << 7) |
+		    ((window & 0x0FFFE0000) >> 17);
+		if (win_read != temp1) {
+			printk("%s: Written OCMwin (0x%x) != Read"
+			    " OCMwin (0x%x)\n", __func__, temp1, win_read);
+		}
+		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+				QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+		/* QDR network side */
+		window = MS_WIN(addr);
+		ha->qdr_sn_window = window;
+		qla4_82xx_wr_32(ha, ha->ms_win_crb |
+		    QLA82XX_PCI_CRBSPACE, window);
+		win_read = qla4_82xx_rd_32(ha,
+		     ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+		if (win_read != window) {
+			printk("%s: Written MSwin (0x%x) != Read "
+			    "MSwin (0x%x)\n", __func__, window, win_read);
+		}
+		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+
+	} else {
+		/*
+		 * peg gdb frequently accesses memory that doesn't exist,
+		 * this limits the chit chat so debugging isn't slowed down.
+		 */
+		if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
+		    (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
+			printk("%s: Warning:%s Unknown address range!\n",
+			    __func__, DRIVER_NAME);
+		}
+		addr = -1UL;
+	}
+	return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
+		unsigned long long addr)
+{
+	int window;
+	unsigned long long qdr_max;
+
+	qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+	if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+	    QLA8XXX_ADDR_DDR_NET_MAX)) {
+		/* DDR network side */
+		BUG();	/* MN access can not come here */
+	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
+	     QLA8XXX_ADDR_OCM0_MAX)) {
+		return 1;
+	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
+	     QLA8XXX_ADDR_OCM1_MAX)) {
+		return 1;
+	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+	    qdr_max)) {
+		/* QDR network side */
+		window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
+		if (ha->qdr_sn_window == window)
+			return 1;
+	}
+
+	return 0;
+}
+
+static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
+		u64 off, void *data, int size)
+{
+	unsigned long flags;
+	void __iomem *addr;
+	int ret = 0;
+	u64 start;
+	void __iomem *mem_ptr = NULL;
+	unsigned long mem_base;
+	unsigned long mem_page;
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/*
+	 * If attempting to access unknown address or straddle hw windows,
+	 * do not access.
+	 */
+	start = qla4_82xx_pci_set_window(ha, off);
+	if ((start == -1UL) ||
+	    (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+		printk(KERN_ERR"%s out of bound pci memory access. "
+				"offset is 0x%llx\n", DRIVER_NAME, off);
+		return -1;
+	}
+
+	addr = qla4_8xxx_pci_base_offsetfset(ha, start);
+	if (!addr) {
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+		mem_base = pci_resource_start(ha->pdev, 0);
+		mem_page = start & PAGE_MASK;
+		/* Map two pages whenever user tries to access addresses in two
+		   consecutive pages.
+		 */
+		if (mem_page != ((start + size - 1) & PAGE_MASK))
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+		else
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+
+		if (mem_ptr == NULL) {
+			*(u8 *)data = 0;
+			return -1;
+		}
+		addr = mem_ptr;
+		addr += start & (PAGE_SIZE - 1);
+		write_lock_irqsave(&ha->hw_lock, flags);
+	}
+
+	switch (size) {
+	case 1:
+		*(u8  *)data = readb(addr);
+		break;
+	case 2:
+		*(u16 *)data = readw(addr);
+		break;
+	case 4:
+		*(u32 *)data = readl(addr);
+		break;
+	case 8:
+		*(u64 *)data = readq(addr);
+		break;
+	default:
+		ret = -1;
+		break;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return ret;
+}
+
+static int
+qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
+		void *data, int size)
+{
+	unsigned long flags;
+	void __iomem *addr;
+	int ret = 0;
+	u64 start;
+	void __iomem *mem_ptr = NULL;
+	unsigned long mem_base;
+	unsigned long mem_page;
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/*
+	 * If attempting to access unknown address or straddle hw windows,
+	 * do not access.
+	 */
+	start = qla4_82xx_pci_set_window(ha, off);
+	if ((start == -1UL) ||
+	    (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+		printk(KERN_ERR"%s out of bound pci memory access. "
+				"offset is 0x%llx\n", DRIVER_NAME, off);
+		return -1;
+	}
+
+	addr = qla4_8xxx_pci_base_offsetfset(ha, start);
+	if (!addr) {
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+		mem_base = pci_resource_start(ha->pdev, 0);
+		mem_page = start & PAGE_MASK;
+		/* Map two pages whenever user tries to access addresses in two
+		   consecutive pages.
+		 */
+		if (mem_page != ((start + size - 1) & PAGE_MASK))
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+		else
+			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+		if (mem_ptr == NULL)
+			return -1;
+
+		addr = mem_ptr;
+		addr += start & (PAGE_SIZE - 1);
+		write_lock_irqsave(&ha->hw_lock, flags);
+	}
+
+	switch (size) {
+	case 1:
+		writeb(*(u8 *)data, addr);
+		break;
+	case 2:
+		writew(*(u16 *)data, addr);
+		break;
+	case 4:
+		writel(*(u32 *)data, addr);
+		break;
+	case 8:
+		writeq(*(u64 *)data, addr);
+		break;
+	default:
+		ret = -1;
+		break;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return ret;
+}
+
+#define MTU_FUDGE_FACTOR 100
+
+static unsigned long
+qla4_82xx_decode_crb_addr(unsigned long addr)
+{
+	int i;
+	unsigned long base_addr, offset, pci_base;
+
+	if (!qla4_8xxx_crb_table_initialized)
+		qla4_82xx_crb_addr_transform_setup();
+
+	pci_base = ADDR_ERROR;
+	base_addr = addr & 0xfff00000;
+	offset = addr & 0x000fffff;
+
+	for (i = 0; i < MAX_CRB_XFORM; i++) {
+		if (crb_addr_xform[i] == base_addr) {
+			pci_base = i << 20;
+			break;
+		}
+	}
+	if (pci_base == ADDR_ERROR)
+		return pci_base;
+	else
+		return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla4_82xx_rom_lock_timeout = 100;
+
+static int
+qla4_82xx_rom_lock(struct scsi_qla_host *ha)
+{
+	int i;
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		/* acquire semaphore2 from PCI HW block */
+
+		done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+		if (done == 1)
+			break;
+		if (timeout >= qla4_82xx_rom_lock_timeout)
+			return -1;
+
+		timeout++;
+
+		/* Yield CPU */
+		if (!in_interrupt())
+			schedule();
+		else {
+			for (i = 0; i < 20; i++)
+				cpu_relax();    /*This a nop instr on i386*/
+		}
+	}
+	qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+	return 0;
+}
+
+static void
+qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
+{
+	qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+}
+
+static int
+qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
+{
+	long timeout = 0;
+	long done = 0 ;
+
+	while (done == 0) {
+		done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+		done &= 2;
+		timeout++;
+		if (timeout >= rom_max_timeout) {
+			printk("%s: Timeout reached  waiting for rom done",
+					DRIVER_NAME);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
+qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+{
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+	if (qla4_82xx_wait_rom_done(ha)) {
+		printk("%s: Error waiting for rom done\n", DRIVER_NAME);
+		return -1;
+	}
+	/* reset abyte_cnt and dummy_byte_cnt */
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+	udelay(10);
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+
+	*valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+	return 0;
+}
+
+static int
+qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
+{
+	int ret, loops = 0;
+
+	while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+		udelay(100);
+		loops++;
+	}
+	if (loops >= 50000) {
+		ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
+			   DRIVER_NAME);
+		return -1;
+	}
+	ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
+	qla4_82xx_rom_unlock(ha);
+	return ret;
+}
+
+/**
+ * This routine does CRB initialize sequence
+ * to put the ISP into operational state
+ **/
+static int
+qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
+{
+	int addr, val;
+	int i ;
+	struct crb_addr_pair *buf;
+	unsigned long off;
+	unsigned offset, n;
+
+	struct crb_addr_pair {
+		long addr;
+		long data;
+	};
+
+	/* Halt all the indiviual PEGs and other blocks of the ISP */
+	qla4_82xx_rom_lock(ha);
+
+	/* disable all I2Q */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+	/* disable all niu interrupts */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+	/* disable xge rx/tx */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+	/* disable xg1 rx/tx */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+	/* disable sideband mac */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+	/* disable ap0 mac */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+	/* disable ap1 mac */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
+
+	/* halt sre */
+	val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+
+	/* halt epg */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+
+	/* halt timers */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
+
+	/* halt pegs */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+	msleep(5);
+
+	/* big hammer */
+	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+		/* don't reset CAM block on reset */
+		qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+	else
+		qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+
+	qla4_82xx_rom_unlock(ha);
+
+	/* Read the signature value from the flash.
+	 * Offset 0: Contain signature (0xcafecafe)
+	 * Offset 4: Offset and number of addr/value pairs
+	 * that present in CRB initialize sequence
+	 */
+	if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+	    qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
+		ql4_printk(KERN_WARNING, ha,
+			"[ERROR] Reading crb_init area: n: %08x\n", n);
+		return -1;
+	}
+
+	/* Offset in flash = lower 16 bits
+	 * Number of enteries = upper 16 bits
+	 */
+	offset = n & 0xffffU;
+	n = (n >> 16) & 0xffffU;
+
+	/* number of addr/value pair should not exceed 1024 enteries */
+	if (n  >= 1024) {
+		ql4_printk(KERN_WARNING, ha,
+		    "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
+		    DRIVER_NAME, __func__, n);
+		return -1;
+	}
+
+	ql4_printk(KERN_INFO, ha,
+		"%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n);
+
+	buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+	if (buf == NULL) {
+		ql4_printk(KERN_WARNING, ha,
+		    "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME);
+		return -1;
+	}
+
+	for (i = 0; i < n; i++) {
+		if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+		    qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
+		    0) {
+			kfree(buf);
+			return -1;
+		}
+
+		buf[i].addr = addr;
+		buf[i].data = val;
+	}
+
+	for (i = 0; i < n; i++) {
+		/* Translate internal CRB initialization
+		 * address to PCI bus address
+		 */
+		off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+		    QLA82XX_PCI_CRBSPACE;
+		/* Not all CRB  addr/value pair to be written,
+		 * some of them are skipped
+		 */
+
+		/* skip if LS bit is set*/
+		if (off & 0x1) {
+			DEBUG2(ql4_printk(KERN_WARNING, ha,
+			    "Skip CRB init replay for offset = 0x%lx\n", off));
+			continue;
+		}
+
+		/* skipping cold reboot MAGIC */
+		if (off == QLA82XX_CAM_RAM(0x1fc))
+			continue;
+
+		/* do not reset PCI */
+		if (off == (ROMUSB_GLB + 0xbc))
+			continue;
+
+		/* skip core clock, so that firmware can increase the clock */
+		if (off == (ROMUSB_GLB + 0xc8))
+			continue;
+
+		/* skip the function enable register */
+		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+			continue;
+
+		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+			continue;
+
+		if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+			continue;
+
+		if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+			continue;
+
+		if (off == ADDR_ERROR) {
+			ql4_printk(KERN_WARNING, ha,
+			    "%s: [ERROR] Unknown addr: 0x%08lx\n",
+			    DRIVER_NAME, buf[i].addr);
+			continue;
+		}
+
+		qla4_82xx_wr_32(ha, off, buf[i].data);
+
+		/* ISP requires much bigger delay to settle down,
+		 * else crb_window returns 0xffffffff
+		 */
+		if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+			msleep(1000);
+
+		/* ISP requires millisec delay between
+		 * successive CRB register updation
+		 */
+		msleep(1);
+	}
+
+	kfree(buf);
+
+	/* Resetting the data and instruction cache */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+	/* Clear all protocol processing engines */
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+
+	return 0;
+}
+
+/**
+ * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
+ * @ha: Pointer to adapter structure
+ * @addr: Flash address to write to
+ * @data: Data to be written
+ * @count: word_count to be written
+ *
+ * Return: On success return QLA_SUCCESS
+ *         On error return QLA_ERROR
+ **/
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+				uint32_t *data, uint32_t count)
+{
+	int i, j;
+	uint32_t agt_ctrl;
+	unsigned long flags;
+	int ret_val = QLA_SUCCESS;
+
+	/* Only 128-bit aligned access */
+	if (addr & 0xF) {
+		ret_val = QLA_ERROR;
+		goto exit_ms_mem_write;
+	}
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/* Write address */
+	ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+	if (ret_val == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
+			   __func__);
+		goto exit_ms_mem_write_unlock;
+	}
+
+	for (i = 0; i < count; i++, addr += 16) {
+		if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+					     QLA8XXX_ADDR_QDR_NET_MAX)) ||
+		      (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+					     QLA8XXX_ADDR_DDR_NET_MAX)))) {
+			ret_val = QLA_ERROR;
+			goto exit_ms_mem_write_unlock;
+		}
+
+		ret_val = ha->isp_ops->wr_reg_indirect(ha,
+						       MD_MIU_TEST_AGT_ADDR_LO,
+						       addr);
+		/* Write data */
+		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+						MD_MIU_TEST_AGT_WRDATA_LO,
+						*data++);
+		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+						MD_MIU_TEST_AGT_WRDATA_HI,
+						*data++);
+		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+						MD_MIU_TEST_AGT_WRDATA_ULO,
+						*data++);
+		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+						MD_MIU_TEST_AGT_WRDATA_UHI,
+						*data++);
+		if (ret_val == QLA_ERROR) {
+			ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
+				   __func__);
+			goto exit_ms_mem_write_unlock;
+		}
+
+		/* Check write status */
+		ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+						       MIU_TA_CTL_WRITE_ENABLE);
+		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+							MD_MIU_TEST_AGT_CTRL,
+							MIU_TA_CTL_WRITE_START);
+		if (ret_val == QLA_ERROR) {
+			ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
+				   __func__);
+			goto exit_ms_mem_write_unlock;
+		}
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			ret_val = ha->isp_ops->rd_reg_indirect(ha,
+							MD_MIU_TEST_AGT_CTRL,
+							&agt_ctrl);
+			if (ret_val == QLA_ERROR) {
+				ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
+					   __func__);
+				goto exit_ms_mem_write_unlock;
+			}
+			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		/* Status check failed */
+		if (j >= MAX_CTL_CHECK) {
+			printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
+					   __func__);
+			ret_val = QLA_ERROR;
+			goto exit_ms_mem_write_unlock;
+		}
+	}
+
+exit_ms_mem_write_unlock:
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+	return ret_val;
+}
+
+static int
+qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
+{
+	int  i, rval = 0;
+	long size = 0;
+	long flashaddr, memaddr;
+	u64 data;
+	u32 high, low;
+
+	flashaddr = memaddr = ha->hw.flt_region_bootload;
+	size = (image_start - flashaddr) / 8;
+
+	DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
+	    ha->host_no, __func__, flashaddr, image_start));
+
+	for (i = 0; i < size; i++) {
+		if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+		    (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
+		    (int *)&high))) {
+			rval = -1;
+			goto exit_load_from_flash;
+		}
+		data = ((u64)high << 32) | low ;
+		rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+		if (rval)
+			goto exit_load_from_flash;
+
+		flashaddr += 8;
+		memaddr   += 8;
+
+		if (i % 0x1000 == 0)
+			msleep(1);
+
+	}
+
+	udelay(100);
+
+	read_lock(&ha->hw_lock);
+	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+	read_unlock(&ha->hw_lock);
+
+exit_load_from_flash:
+	return rval;
+}
+
+static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
+{
+	u32 rst;
+
+	qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+	if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
+		printk(KERN_WARNING "%s: Error during CRB Initialization\n",
+		    __func__);
+		return QLA_ERROR;
+	}
+
+	udelay(500);
+
+	/* at this point, QM is in reset. This could be a problem if there are
+	 * incoming d* transition queue messages. QM/PCIE could wedge.
+	 * To get around this, QM is brought out of reset.
+	 */
+
+	rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+	/* unreset qm */
+	rst &= ~(1 << 28);
+	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+	if (qla4_82xx_load_from_flash(ha, image_start)) {
+		printk("%s: Error trying to load fw from flash!\n", __func__);
+		return QLA_ERROR;
+	}
+
+	return QLA_SUCCESS;
+}
+
+int
+qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
+		u64 off, void *data, int size)
+{
+	int i, j = 0, k, start, end, loop, sz[2], off0[2];
+	int shift_amount;
+	uint32_t temp;
+	uint64_t off8, val, mem_crb, word[2] = {0, 0};
+
+	/*
+	 * If not MN, go check for MS or invalid.
+	 */
+
+	if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+		mem_crb = QLA82XX_CRB_QDR_NET;
+	else {
+		mem_crb = QLA82XX_CRB_DDR_NET;
+		if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+			return qla4_82xx_pci_mem_read_direct(ha,
+					off, data, size);
+	}
+
+
+	off8 = off & 0xfffffff0;
+	off0[0] = off & 0xf;
+	sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+	shift_amount = 4;
+
+	loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+	off0[1] = 0;
+	sz[1] = size - sz[0];
+
+	for (i = 0; i < loop; i++) {
+		temp = off8 + (i << shift_amount);
+		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+		temp = 0;
+		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+		temp = MIU_TA_CTL_ENABLE;
+		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+		temp = MIU_TA_CTL_START_ENABLE;
+		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+			if ((temp & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			printk_ratelimited(KERN_ERR
+					   "%s: failed to read through agent\n",
+					   __func__);
+			break;
+		}
+
+		start = off0[i] >> 2;
+		end   = (off0[i] + sz[i] - 1) >> 2;
+		for (k = start; k <= end; k++) {
+			temp = qla4_82xx_rd_32(ha,
+				mem_crb + MIU_TEST_AGT_RDDATA(k));
+			word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+		}
+	}
+
+	if (j >= MAX_CTL_CHECK)
+		return -1;
+
+	if ((off0[0] & 7) == 0) {
+		val = word[0];
+	} else {
+		val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+		((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+	}
+
+	switch (size) {
+	case 1:
+		*(uint8_t  *)data = val;
+		break;
+	case 2:
+		*(uint16_t *)data = val;
+		break;
+	case 4:
+		*(uint32_t *)data = val;
+		break;
+	case 8:
+		*(uint64_t *)data = val;
+		break;
+	}
+	return 0;
+}
+
+int
+qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
+		u64 off, void *data, int size)
+{
+	int i, j, ret = 0, loop, sz[2], off0;
+	int scale, shift_amount, startword;
+	uint32_t temp;
+	uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+	/*
+	 * If not MN, go check for MS or invalid.
+	 */
+	if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+		mem_crb = QLA82XX_CRB_QDR_NET;
+	else {
+		mem_crb = QLA82XX_CRB_DDR_NET;
+		if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
+			return qla4_82xx_pci_mem_write_direct(ha,
+					off, data, size);
+	}
+
+	off0 = off & 0x7;
+	sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+	sz[1] = size - sz[0];
+
+	off8 = off & 0xfffffff0;
+	loop = (((off & 0xf) + size - 1) >> 4) + 1;
+	shift_amount = 4;
+	scale = 2;
+	startword = (off & 0xf)/8;
+
+	for (i = 0; i < loop; i++) {
+		if (qla4_82xx_pci_mem_read_2M(ha, off8 +
+		    (i << shift_amount), &word[i * scale], 8))
+			return -1;
+	}
+
+	switch (size) {
+	case 1:
+		tmpw = *((uint8_t *)data);
+		break;
+	case 2:
+		tmpw = *((uint16_t *)data);
+		break;
+	case 4:
+		tmpw = *((uint32_t *)data);
+		break;
+	case 8:
+	default:
+		tmpw = *((uint64_t *)data);
+		break;
+	}
+
+	if (sz[0] == 8)
+		word[startword] = tmpw;
+	else {
+		word[startword] &=
+		    ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+		word[startword] |= tmpw << (off0 * 8);
+	}
+
+	if (sz[1] != 0) {
+		word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+		word[startword+1] |= tmpw >> (sz[0] * 8);
+	}
+
+	for (i = 0; i < loop; i++) {
+		temp = off8 + (i << shift_amount);
+		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+		temp = 0;
+		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+		temp = word[i * scale] & 0xffffffff;
+		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+		temp = (word[i * scale] >> 32) & 0xffffffff;
+		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+		temp = word[i*scale + 1] & 0xffffffff;
+		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
+		    temp);
+		temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
+		    temp);
+
+		temp = MIU_TA_CTL_WRITE_ENABLE;
+		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+		temp = MIU_TA_CTL_WRITE_START;
+		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+			if ((temp & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			if (printk_ratelimit())
+				ql4_printk(KERN_ERR, ha,
+					   "%s: failed to read through agent\n",
+					   __func__);
+			ret = -1;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
+{
+	u32 val = 0;
+	int retries = 60;
+
+	if (!pegtune_val) {
+		do {
+			val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
+			if ((val == PHAN_INITIALIZE_COMPLETE) ||
+			    (val == PHAN_INITIALIZE_ACK))
+				return 0;
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(500);
+
+		} while (--retries);
+
+		if (!retries) {
+			pegtune_val = qla4_82xx_rd_32(ha,
+				QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+			printk(KERN_WARNING "%s: init failed, "
+				"pegtune_val = %x\n", __func__, pegtune_val);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
+{
+	uint32_t state = 0;
+	int loops = 0;
+
+	/* Window 1 call */
+	read_lock(&ha->hw_lock);
+	state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
+	read_unlock(&ha->hw_lock);
+
+	while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
+		udelay(100);
+		/* Window 1 call */
+		read_lock(&ha->hw_lock);
+		state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
+		read_unlock(&ha->hw_lock);
+
+		loops++;
+	}
+
+	if (loops >= 30000) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+		    "Receive Peg initialization not complete: 0x%x.\n", state));
+		return QLA_ERROR;
+	}
+
+	return QLA_SUCCESS;
+}
+
+void
+qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
+{
+	uint32_t drv_active;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+	/*
+	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP8022, drv_active has 4 bits per function
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		drv_active |= (1 << ha->func_num);
+	else
+		drv_active |= (1 << (ha->func_num * 4));
+
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+		   __func__, ha->host_no, drv_active);
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
+}
+
+void
+qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
+{
+	uint32_t drv_active;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+
+	/*
+	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP8022, drv_active has 4 bits per function
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		drv_active &= ~(1 << (ha->func_num));
+	else
+		drv_active &= ~(1 << (ha->func_num * 4));
+
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+		   __func__, ha->host_no, drv_active);
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
+{
+	uint32_t drv_state, drv_active;
+	int rval;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+	drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+	/*
+	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP8022, drv_active has 4 bits per function
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		rval = drv_state & (1 << ha->func_num);
+	else
+		rval = drv_state & (1 << (ha->func_num * 4));
+
+	if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
+		rval = 1;
+
+	return rval;
+}
+
+void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
+{
+	uint32_t drv_state;
+
+	drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+	/*
+	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP8022, drv_active has 4 bits per function
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		drv_state |= (1 << ha->func_num);
+	else
+		drv_state |= (1 << (ha->func_num * 4));
+
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+		   __func__, ha->host_no, drv_state);
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
+}
+
+void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
+{
+	uint32_t drv_state;
+
+	drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+	/*
+	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP8022, drv_active has 4 bits per function
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		drv_state &= ~(1 << ha->func_num);
+	else
+		drv_state &= ~(1 << (ha->func_num * 4));
+
+	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+		   __func__, ha->host_no, drv_state);
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
+{
+	uint32_t qsnt_state;
+
+	qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
+
+	/*
+	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP8022, drv_active has 4 bits per function.
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha))
+		qsnt_state |= (1 << ha->func_num);
+	else
+		qsnt_state |= (2 << (ha->func_num * 4));
+
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
+}
+
+
+static int
+qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
+{
+	uint16_t lnk;
+
+	/* scrub dma mask expansion register */
+	qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
+
+	/* Overwrite stale initialization register values */
+	qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+	qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+	qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+	if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
+		printk("%s: Error trying to start fw!\n", __func__);
+		return QLA_ERROR;
+	}
+
+	/* Handshake with the card before we register the devices. */
+	if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
+		printk("%s: Error during card handshake!\n", __func__);
+		return QLA_ERROR;
+	}
+
+	/* Negotiated Link width */
+	pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
+	ha->link_width = (lnk >> 4) & 0x3f;
+
+	/* Synchronize with Receive peg */
+	return qla4_82xx_rcvpeg_ready(ha);
+}
+
+int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
+{
+	int rval = QLA_ERROR;
+
+	/*
+	 * FW Load priority:
+	 * 1) Operational firmware residing in flash.
+	 * 2) Fail
+	 */
+
+	ql4_printk(KERN_INFO, ha,
+	    "FW: Retrieving flash offsets from FLT/FDT ...\n");
+	rval = qla4_8xxx_get_flash_info(ha);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	ql4_printk(KERN_INFO, ha,
+	    "FW: Attempting to load firmware from flash...\n");
+	rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
+
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
+		    " FAILED...\n");
+		return rval;
+	}
+
+	return rval;
+}
+
+void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+	if (qla4_82xx_rom_lock(ha)) {
+		/* Someone else is holding the lock. */
+		dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
+	}
+
+	/*
+	 * Either we got the lock, or someone
+	 * else died while holding it.
+	 * In either case, unlock.
+	 */
+	qla4_82xx_rom_unlock(ha);
+}
+
+static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
+					     uint32_t addr1, uint32_t mask)
+{
+	unsigned long timeout;
+	uint32_t rval = QLA_SUCCESS;
+	uint32_t temp;
+
+	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+	do {
+		ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+		if ((temp & mask) != 0)
+			break;
+
+		if (time_after_eq(jiffies, timeout)) {
+			ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
+			return QLA_ERROR;
+		}
+	} while (1);
+
+	return rval;
+}
+
+static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+				uint32_t addr3, uint32_t mask, uint32_t addr,
+				uint32_t *data_ptr)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t temp;
+	uint32_t data;
+
+	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+	if (rval)
+		goto exit_ipmdio_rd_reg;
+
+	temp = (0x40000000 | addr);
+	ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
+
+	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+	if (rval)
+		goto exit_ipmdio_rd_reg;
+
+	ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
+	*data_ptr = data;
+
+exit_ipmdio_rd_reg:
+	return rval;
+}
+
+
+static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
+						    uint32_t addr1,
+						    uint32_t addr2,
+						    uint32_t addr3,
+						    uint32_t mask)
+{
+	unsigned long timeout;
+	uint32_t temp;
+	uint32_t rval = QLA_SUCCESS;
+
+	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+	do {
+		ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
+		if ((temp & 0x1) != 1)
+			break;
+		if (time_after_eq(jiffies, timeout)) {
+			ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
+			return QLA_ERROR;
+		}
+	} while (1);
+
+	return rval;
+}
+
+static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
+				  uint32_t addr1, uint32_t addr3,
+				  uint32_t mask, uint32_t addr,
+				  uint32_t value)
+{
+	int rval = QLA_SUCCESS;
+
+	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+	if (rval)
+		goto exit_ipmdio_wr_reg;
+
+	ha->isp_ops->wr_reg_indirect(ha, addr3, value);
+	ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
+
+	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+	if (rval)
+		goto exit_ipmdio_wr_reg;
+
+exit_ipmdio_wr_reg:
+	return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla8xxx_minidump_entry_crb *crb_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
+	r_addr = crb_hdr->addr;
+	r_stride = crb_hdr->crb_strd.addr_stride;
+	loop_cnt = crb_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+		*data_ptr++ = cpu_to_le32(r_addr);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += r_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+	uint64_t dma_base_addr = 0;
+	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+							ha->fw_dump_tmplt_hdr;
+	dma_eng_num =
+		tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+	dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+				(dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+	/* Read the pex-dma's command-status-and-control register. */
+	rval = ha->isp_ops->rd_reg_indirect(ha,
+			(dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+			&cmd_sts_and_cntrl);
+
+	if (rval)
+		return QLA_ERROR;
+
+	/* Check if requested pex-dma engine is available. */
+	if (cmd_sts_and_cntrl & BIT_31)
+		return QLA_SUCCESS;
+	else
+		return QLA_ERROR;
+}
+
+static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
+			   struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+	int rval = QLA_SUCCESS, wait = 0;
+	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+	uint64_t dma_base_addr = 0;
+	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+							ha->fw_dump_tmplt_hdr;
+	dma_eng_num =
+		tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+	dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+				(dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+	rval = ha->isp_ops->wr_reg_indirect(ha,
+				dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
+				m_hdr->desc_card_addr);
+	if (rval)
+		goto error_exit;
+
+	rval = ha->isp_ops->wr_reg_indirect(ha,
+			      dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
+	if (rval)
+		goto error_exit;
+
+	rval = ha->isp_ops->wr_reg_indirect(ha,
+			      dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
+			      m_hdr->start_dma_cmd);
+	if (rval)
+		goto error_exit;
+
+	/* Wait for dma operation to complete. */
+	for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
+		rval = ha->isp_ops->rd_reg_indirect(ha,
+			    (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+			    &cmd_sts_and_cntrl);
+		if (rval)
+			goto error_exit;
+
+		if ((cmd_sts_and_cntrl & BIT_1) == 0)
+			break;
+		else
+			udelay(10);
+	}
+
+	/* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+	if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
+		rval = QLA_ERROR;
+		goto error_exit;
+	}
+
+error_exit:
+	return rval;
+}
+
+static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	int rval = QLA_SUCCESS;
+	struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+	uint32_t size, read_size;
+	uint8_t *data_ptr = (uint8_t *)*d_ptr;
+	void *rdmem_buffer = NULL;
+	dma_addr_t rdmem_dma;
+	struct qla4_83xx_pex_dma_descriptor dma_desc;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+
+	rval = qla4_83xx_check_dma_engine_state(ha);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: DMA engine not available. Fallback to rdmem-read.\n",
+				  __func__));
+		return QLA_ERROR;
+	}
+
+	m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
+	rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+					  QLA83XX_PEX_DMA_READ_SIZE,
+					  &rdmem_dma, GFP_KERNEL);
+	if (!rdmem_buffer) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Unable to allocate rdmem dma buffer\n",
+				  __func__));
+		return QLA_ERROR;
+	}
+
+	/* Prepare pex-dma descriptor to be written to MS memory. */
+	/* dma-desc-cmd layout:
+	 *              0-3: dma-desc-cmd 0-3
+	 *              4-7: pcid function number
+	 *              8-15: dma-desc-cmd 8-15
+	 */
+	dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+	dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+	dma_desc.dma_bus_addr = rdmem_dma;
+
+	size = 0;
+	read_size = 0;
+	/*
+	 * Perform rdmem operation using pex-dma.
+	 * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
+	 */
+	while (read_size < m_hdr->read_data_size) {
+		if (m_hdr->read_data_size - read_size >=
+		    QLA83XX_PEX_DMA_READ_SIZE)
+			size = QLA83XX_PEX_DMA_READ_SIZE;
+		else {
+			size = (m_hdr->read_data_size - read_size);
+
+			if (rdmem_buffer)
+				dma_free_coherent(&ha->pdev->dev,
+						  QLA83XX_PEX_DMA_READ_SIZE,
+						  rdmem_buffer, rdmem_dma);
+
+			rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
+							  &rdmem_dma,
+							  GFP_KERNEL);
+			if (!rdmem_buffer) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "%s: Unable to allocate rdmem dma buffer\n",
+						  __func__));
+				return QLA_ERROR;
+			}
+			dma_desc.dma_bus_addr = rdmem_dma;
+		}
+
+		dma_desc.src_addr = m_hdr->read_addr + read_size;
+		dma_desc.cmd.read_data_size = size;
+
+		/* Prepare: Write pex-dma descriptor to MS memory. */
+		rval = qla4_8xxx_ms_mem_write_128b(ha,
+			      (uint64_t)m_hdr->desc_card_addr,
+			      (uint32_t *)&dma_desc,
+			      (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
+		if (rval != QLA_SUCCESS) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s: Error writing rdmem-dma-init to MS !!!\n",
+				   __func__);
+			goto error_exit;
+		}
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
+				  __func__, size));
+		/* Execute: Start pex-dma operation. */
+		rval = qla4_83xx_start_pex_dma(ha, m_hdr);
+		if (rval != QLA_SUCCESS) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi(%ld): start-pex-dma failed rval=0x%x\n",
+					  ha->host_no, rval));
+			goto error_exit;
+		}
+
+		memcpy(data_ptr, rdmem_buffer, size);
+		data_ptr += size;
+		read_size += size;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+
+	*d_ptr = (uint32_t *)data_ptr;
+
+error_exit:
+	if (rdmem_buffer)
+		dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
+				  rdmem_dma);
+
+	return rval;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+				 struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				 uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	unsigned long p_wait, w_time, p_mask;
+	uint32_t c_value_w, c_value_r;
+	struct qla8xxx_minidump_entry_cache *cache_hdr;
+	int rval = QLA_ERROR;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
+
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+	p_wait = cache_hdr->cache_ctrl.poll_wait;
+	p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+	for (i = 0; i < loop_count; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
+
+		if (c_value_w)
+			ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
+
+		if (p_mask) {
+			w_time = jiffies + p_wait;
+			do {
+				ha->isp_ops->rd_reg_indirect(ha, c_addr,
+							     &c_value_r);
+				if ((c_value_r & p_mask) == 0) {
+					break;
+				} else if (time_after_eq(jiffies, w_time)) {
+					/* capturing dump failed */
+					return rval;
+				}
+			} while (1);
+		}
+
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
+			*data_ptr++ = cpu_to_le32(r_value);
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr)
+{
+	struct qla8xxx_minidump_entry_crb *crb_entry;
+	uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+	uint32_t crb_addr;
+	unsigned long wtime;
+	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+	int i;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+						ha->fw_dump_tmplt_hdr;
+	crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
+
+	crb_addr = crb_entry->addr;
+	for (i = 0; i < crb_entry->op_count; i++) {
+		opcode = crb_entry->crb_ctrl.opcode;
+		if (opcode & QLA8XXX_DBG_OPCODE_WR) {
+			ha->isp_ops->wr_reg_indirect(ha, crb_addr,
+						     crb_entry->value_1);
+			opcode &= ~QLA8XXX_DBG_OPCODE_WR;
+		}
+		if (opcode & QLA8XXX_DBG_OPCODE_RW) {
+			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+			ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+			opcode &= ~QLA8XXX_DBG_OPCODE_RW;
+		}
+		if (opcode & QLA8XXX_DBG_OPCODE_AND) {
+			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+			read_value &= crb_entry->value_2;
+			opcode &= ~QLA8XXX_DBG_OPCODE_AND;
+			if (opcode & QLA8XXX_DBG_OPCODE_OR) {
+				read_value |= crb_entry->value_3;
+				opcode &= ~QLA8XXX_DBG_OPCODE_OR;
+			}
+			ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+		}
+		if (opcode & QLA8XXX_DBG_OPCODE_OR) {
+			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+			read_value |= crb_entry->value_3;
+			ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
+			opcode &= ~QLA8XXX_DBG_OPCODE_OR;
+		}
+		if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
+			poll_time = crb_entry->crb_strd.poll_timeout;
+			wtime = jiffies + poll_time;
+			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
+
+			do {
+				if ((read_value & crb_entry->value_2) ==
+				    crb_entry->value_1) {
+					break;
+				} else if (time_after_eq(jiffies, wtime)) {
+					/* capturing dump failed */
+					rval = QLA_ERROR;
+					break;
+				} else {
+					ha->isp_ops->rd_reg_indirect(ha,
+							crb_addr, &read_value);
+				}
+			} while (1);
+			opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
+		}
+
+		if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
+			index = crb_entry->crb_ctrl.state_index_v;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
+		}
+
+		if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			if (crb_entry->crb_ctrl.state_index_v) {
+				index = crb_entry->crb_ctrl.state_index_v;
+				read_value =
+					tmplt_hdr->saved_state_array[index];
+			} else {
+				read_value = crb_entry->value_1;
+			}
+
+			ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
+			opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
+		}
+
+		if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
+			index = crb_entry->crb_ctrl.state_index_v;
+			read_value = tmplt_hdr->saved_state_array[index];
+			read_value <<= crb_entry->crb_ctrl.shl;
+			read_value >>= crb_entry->crb_ctrl.shr;
+			if (crb_entry->value_2)
+				read_value &= crb_entry->value_2;
+			read_value |= crb_entry->value_3;
+			read_value += crb_entry->value_1;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
+		}
+		crb_addr += crb_entry->crb_strd.addr_stride;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+	return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
+	r_addr = ocm_hdr->read_addr;
+	r_stride = ocm_hdr->read_addr_stride;
+	loop_cnt = ocm_hdr->op_count;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+			  __func__, r_addr, r_stride, loop_cnt));
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += r_stride;
+	}
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+		__func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
+	*d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+	struct qla8xxx_minidump_entry_mux *mux_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
+	r_addr = mux_hdr->read_addr;
+	s_addr = mux_hdr->select_addr;
+	s_stride = mux_hdr->select_value_stride;
+	s_value = mux_hdr->select_value;
+	loop_cnt = mux_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+		ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+		*data_ptr++ = cpu_to_le32(s_value);
+		*data_ptr++ = cpu_to_le32(r_value);
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	uint32_t c_value_w;
+	struct qla8xxx_minidump_entry_cache *cache_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+	for (i = 0; i < loop_count; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
+		ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
+			*data_ptr++ = cpu_to_le32(r_value);
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t s_addr, r_addr;
+	uint32_t r_stride, r_value, r_cnt, qid = 0;
+	uint32_t i, k, loop_cnt;
+	struct qla8xxx_minidump_entry_queue *q_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
+	s_addr = q_hdr->select_addr;
+	r_cnt = q_hdr->rd_strd.read_addr_cnt;
+	r_stride = q_hdr->rd_strd.read_addr_stride;
+	loop_cnt = q_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
+		r_addr = q_hdr->read_addr;
+		for (k = 0; k < r_cnt; k++) {
+			ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+			*data_ptr++ = cpu_to_le32(r_value);
+			r_addr += r_stride;
+		}
+		qid += q_hdr->q_strd.queue_id_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW		0x42110030
+#define MD_DIRECT_ROM_READ_BASE		0x42150000
+
+static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_value;
+	uint32_t i, loop_cnt;
+	struct qla8xxx_minidump_entry_rdrom *rom_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
+	r_addr = rom_hdr->read_addr;
+	loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+			   __func__, r_addr, loop_cnt));
+
+	for (i = 0; i < loop_cnt; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
+					     (r_addr & 0xFFFF0000));
+		ha->isp_ops->rd_reg_indirect(ha,
+				MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
+				&r_value);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += sizeof(uint32_t);
+	}
+	*d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL		0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO		0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI		0x41000098
+
+static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_value, r_data;
+	uint32_t i, j, loop_cnt;
+	struct qla8xxx_minidump_entry_rdmem *m_hdr;
+	unsigned long flags;
+	uint32_t *data_ptr = *d_ptr;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+	m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
+	r_addr = m_hdr->read_addr;
+	loop_cnt = m_hdr->read_data_size/16;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+			  __func__, r_addr, m_hdr->read_data_size));
+
+	if (r_addr & 0xf) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "[%s]: Read addr 0x%x not 16 bytes aligned\n",
+				  __func__, r_addr));
+		return QLA_ERROR;
+	}
+
+	if (m_hdr->read_data_size % 16) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+				  __func__, m_hdr->read_data_size));
+		return QLA_ERROR;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+			  __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+	for (i = 0; i < loop_cnt; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
+					     r_addr);
+		r_value = 0;
+		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
+					     r_value);
+		r_value = MIU_TA_CTL_ENABLE;
+		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
+		r_value = MIU_TA_CTL_START_ENABLE;
+		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+						     &r_value);
+			if ((r_value & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			printk_ratelimited(KERN_ERR
+					   "%s: failed to read through agent\n",
+					    __func__);
+			write_unlock_irqrestore(&ha->hw_lock, flags);
+			return QLA_SUCCESS;
+		}
+
+		for (j = 0; j < 4; j++) {
+			ha->isp_ops->rd_reg_indirect(ha,
+						     MD_MIU_TEST_AGT_RDDATA[j],
+						     &r_data);
+			*data_ptr++ = cpu_to_le32(r_data);
+		}
+
+		r_addr += 16;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+			  __func__, (loop_cnt * 16)));
+
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t *data_ptr = *d_ptr;
+	int rval = QLA_SUCCESS;
+
+	rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
+	if (rval != QLA_SUCCESS)
+		rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+							  &data_ptr);
+	*d_ptr = data_ptr;
+	return rval;
+}
+
+static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				int index)
+{
+	entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+			  ha->host_no, index, entry_hdr->entry_type,
+			  entry_hdr->d_ctrl.entry_capture_mask));
+	/* If driver encounters a new entry type that it cannot process,
+	 * it should just skip the entry and adjust the total buffer size by
+	 * from subtracting the skipped bytes from it
+	 */
+	ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+	uint16_t s_stride, i;
+	uint32_t *data_ptr = *d_ptr;
+	uint32_t rval = QLA_SUCCESS;
+	struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
+
+	pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
+	s_addr = le32_to_cpu(pollrd_hdr->select_addr);
+	r_addr = le32_to_cpu(pollrd_hdr->read_addr);
+	s_value = le32_to_cpu(pollrd_hdr->select_value);
+	s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
+
+	poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+	poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
+
+	for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
+		ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
+		poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
+		while (1) {
+			ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
+
+			if ((r_value & poll_mask) != 0) {
+				break;
+			} else {
+				msleep(1);
+				if (--poll_wait == 0) {
+					ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+						   __func__);
+					rval = QLA_ERROR;
+					goto exit_process_pollrd;
+				}
+			}
+		}
+		ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
+		*data_ptr++ = cpu_to_le32(s_value);
+		*data_ptr++ = cpu_to_le32(r_value);
+		s_value += s_stride;
+	}
+
+	*d_ptr = data_ptr;
+
+exit_process_pollrd:
+	return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	int loop_cnt;
+	uint32_t addr1, addr2, value, data, temp, wrval;
+	uint8_t stride, stride2;
+	uint16_t count;
+	uint32_t poll, mask, data_size, modify_mask;
+	uint32_t wait_count = 0;
+	uint32_t *data_ptr = *d_ptr;
+	struct qla8044_minidump_entry_rddfe *rddfe;
+	uint32_t rval = QLA_SUCCESS;
+
+	rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
+	addr1 = le32_to_cpu(rddfe->addr_1);
+	value = le32_to_cpu(rddfe->value);
+	stride = le32_to_cpu(rddfe->stride);
+	stride2 = le32_to_cpu(rddfe->stride2);
+	count = le32_to_cpu(rddfe->count);
+
+	poll = le32_to_cpu(rddfe->poll);
+	mask = le32_to_cpu(rddfe->mask);
+	modify_mask = le32_to_cpu(rddfe->modify_mask);
+	data_size = le32_to_cpu(rddfe->data_size);
+
+	addr2 = addr1 + stride;
+
+	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+		ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
+
+		wait_count = 0;
+		while (wait_count < poll) {
+			ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+			if ((temp & mask) != 0)
+				break;
+			wait_count++;
+		}
+
+		if (wait_count == poll) {
+			ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+			rval = QLA_ERROR;
+			goto exit_process_rddfe;
+		} else {
+			ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
+			temp = temp & modify_mask;
+			temp = (temp | ((loop_cnt << 16) | loop_cnt));
+			wrval = ((temp << 16) | temp);
+
+			ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
+			ha->isp_ops->wr_reg_indirect(ha, addr1, value);
+
+			wait_count = 0;
+			while (wait_count < poll) {
+				ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+				if ((temp & mask) != 0)
+					break;
+				wait_count++;
+			}
+			if (wait_count == poll) {
+				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+					   __func__);
+				rval = QLA_ERROR;
+				goto exit_process_rddfe;
+			}
+
+			ha->isp_ops->wr_reg_indirect(ha, addr1,
+						     ((0x40000000 | value) +
+						     stride2));
+			wait_count = 0;
+			while (wait_count < poll) {
+				ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+				if ((temp & mask) != 0)
+					break;
+				wait_count++;
+			}
+
+			if (wait_count == poll) {
+				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+					   __func__);
+				rval = QLA_ERROR;
+				goto exit_process_rddfe;
+			}
+
+			ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
+
+			*data_ptr++ = cpu_to_le32(wrval);
+			*data_ptr++ = cpu_to_le32(data);
+		}
+	}
+
+	*d_ptr = data_ptr;
+exit_process_rddfe:
+	return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t addr1, addr2, value1, value2, data, selval;
+	uint8_t stride1, stride2;
+	uint32_t addr3, addr4, addr5, addr6, addr7;
+	uint16_t count, loop_cnt;
+	uint32_t poll, mask;
+	uint32_t *data_ptr = *d_ptr;
+	struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+	rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
+	addr1 = le32_to_cpu(rdmdio->addr_1);
+	addr2 = le32_to_cpu(rdmdio->addr_2);
+	value1 = le32_to_cpu(rdmdio->value_1);
+	stride1 = le32_to_cpu(rdmdio->stride_1);
+	stride2 = le32_to_cpu(rdmdio->stride_2);
+	count = le32_to_cpu(rdmdio->count);
+
+	poll = le32_to_cpu(rdmdio->poll);
+	mask = le32_to_cpu(rdmdio->mask);
+	value2 = le32_to_cpu(rdmdio->value_2);
+
+	addr3 = addr1 + stride1;
+
+	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+		rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+							 addr3, mask);
+		if (rval)
+			goto exit_process_rdmdio;
+
+		addr4 = addr2 - stride1;
+		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
+					     value2);
+		if (rval)
+			goto exit_process_rdmdio;
+
+		addr5 = addr2 - (2 * stride1);
+		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
+					     value1);
+		if (rval)
+			goto exit_process_rdmdio;
+
+		addr6 = addr2 - (3 * stride1);
+		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
+					     addr6, 0x2);
+		if (rval)
+			goto exit_process_rdmdio;
+
+		rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+							 addr3, mask);
+		if (rval)
+			goto exit_process_rdmdio;
+
+		addr7 = addr2 - (4 * stride1);
+		rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
+						      mask, addr7, &data);
+		if (rval)
+			goto exit_process_rdmdio;
+
+		selval = (value2 << 18) | (value1 << 2) | 2;
+
+		stride2 = le32_to_cpu(rdmdio->stride_2);
+		*data_ptr++ = cpu_to_le32(selval);
+		*data_ptr++ = cpu_to_le32(data);
+
+		value1 = value1 + stride2;
+		*d_ptr = data_ptr;
+	}
+
+exit_process_rdmdio:
+	return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+	struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+	uint32_t wait_count = 0;
+	uint32_t rval = QLA_SUCCESS;
+
+	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+	addr1 = le32_to_cpu(pollwr_hdr->addr_1);
+	addr2 = le32_to_cpu(pollwr_hdr->addr_2);
+	value1 = le32_to_cpu(pollwr_hdr->value_1);
+	value2 = le32_to_cpu(pollwr_hdr->value_2);
+
+	poll = le32_to_cpu(pollwr_hdr->poll);
+	mask = le32_to_cpu(pollwr_hdr->mask);
+
+	while (wait_count < poll) {
+		ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+		if ((r_value & poll) != 0)
+			break;
+
+		wait_count++;
+	}
+
+	if (wait_count == poll) {
+		ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+		rval = QLA_ERROR;
+		goto exit_process_pollwr;
+	}
+
+	ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
+	ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
+
+	wait_count = 0;
+	while (wait_count < poll) {
+		ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+		if ((r_value & poll) != 0)
+			break;
+		wait_count++;
+	}
+
+exit_process_pollwr:
+	return rval;
+}
+
+static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+	uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+	struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
+	sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
+	sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
+	sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
+	sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
+	sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
+	read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
+
+	for (i = 0; i < rdmux2_hdr->op_count; i++) {
+		ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
+		t_sel_val = sel_val1 & sel_val_mask;
+		*data_ptr++ = cpu_to_le32(t_sel_val);
+
+		ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+		ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+		*data_ptr++ = cpu_to_le32(data);
+
+		ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
+		t_sel_val = sel_val2 & sel_val_mask;
+		*data_ptr++ = cpu_to_le32(t_sel_val);
+
+		ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
+		ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
+
+		*data_ptr++ = cpu_to_le32(data);
+
+		sel_val1 += rdmux2_hdr->select_value_stride;
+		sel_val2 += rdmux2_hdr->select_value_stride;
+	}
+
+	*d_ptr = data_ptr;
+}
+
+static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t poll_wait, poll_mask, r_value, data;
+	uint32_t addr_1, addr_2, value_1, value_2;
+	uint32_t *data_ptr = *d_ptr;
+	uint32_t rval = QLA_SUCCESS;
+	struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
+
+	poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
+	addr_1 = le32_to_cpu(poll_hdr->addr_1);
+	addr_2 = le32_to_cpu(poll_hdr->addr_2);
+	value_1 = le32_to_cpu(poll_hdr->value_1);
+	value_2 = le32_to_cpu(poll_hdr->value_2);
+	poll_mask = le32_to_cpu(poll_hdr->poll_mask);
+
+	ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
+
+	poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+	while (1) {
+		ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+		if ((r_value & poll_mask) != 0) {
+			break;
+		} else {
+			msleep(1);
+			if (--poll_wait == 0) {
+				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
+					   __func__);
+				rval = QLA_ERROR;
+				goto exit_process_pollrdmwr;
+			}
+		}
+	}
+
+	ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
+	data &= le32_to_cpu(poll_hdr->modify_mask);
+	ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
+	ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
+
+	poll_wait = le32_to_cpu(poll_hdr->poll_wait);
+	while (1) {
+		ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
+
+		if ((r_value & poll_mask) != 0) {
+			break;
+		} else {
+			msleep(1);
+			if (--poll_wait == 0) {
+				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
+					   __func__);
+				rval = QLA_ERROR;
+				goto exit_process_pollrdmwr;
+			}
+		}
+	}
+
+	*data_ptr++ = cpu_to_le32(addr_2);
+	*data_ptr++ = cpu_to_le32(data);
+	*d_ptr = data_ptr;
+
+exit_process_pollrdmwr:
+	return rval;
+}
+
+static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
+				struct qla8xxx_minidump_entry_hdr *entry_hdr,
+				uint32_t **d_ptr)
+{
+	uint32_t fl_addr, u32_count, rval;
+	struct qla8xxx_minidump_entry_rdrom *rom_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
+	fl_addr = le32_to_cpu(rom_hdr->read_addr);
+	u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+			  __func__, fl_addr, u32_count));
+
+	rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
+						 (u8 *)(data_ptr), u32_count);
+
+	if (rval == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
+			   __func__, u32_count);
+		goto exit_process_rdrom;
+	}
+
+	data_ptr += u32_count;
+	*d_ptr = data_ptr;
+
+exit_process_rdrom:
+	return rval;
+}
+
+/**
+ * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+	int num_entry_hdr = 0;
+	struct qla8xxx_minidump_entry_hdr *entry_hdr;
+	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+	uint32_t *data_ptr;
+	uint32_t data_collected = 0;
+	int i, rval = QLA_ERROR;
+	uint64_t now;
+	uint32_t timestamp;
+
+	ha->fw_dump_skip_size = 0;
+	if (!ha->fw_dump) {
+		ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+			   __func__, ha->host_no);
+		return rval;
+	}
+
+	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+						ha->fw_dump_tmplt_hdr;
+	data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+						ha->fw_dump_tmplt_size);
+	data_collected += ha->fw_dump_tmplt_size;
+
+	num_entry_hdr = tmplt_hdr->num_of_entries;
+	ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+		   __func__, data_ptr);
+	ql4_printk(KERN_INFO, ha,
+		   "[%s]: no of entry headers in Template: 0x%x\n",
+		   __func__, num_entry_hdr);
+	ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+		   __func__, ha->fw_dump_capture_mask);
+	ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+		   __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+	/* Update current timestamp before taking dump */
+	now = get_jiffies_64();
+	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+	tmplt_hdr->driver_timestamp = timestamp;
+
+	entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
+					(((uint8_t *)ha->fw_dump_tmplt_hdr) +
+					 tmplt_hdr->first_entry_offset);
+
+	if (is_qla8032(ha) || is_qla8042(ha))
+		tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
+					tmplt_hdr->ocm_window_reg[ha->func_num];
+
+	/* Walk through the entry headers - validate/perform required action */
+	for (i = 0; i < num_entry_hdr; i++) {
+		if (data_collected > ha->fw_dump_size) {
+			ql4_printk(KERN_INFO, ha,
+				   "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+				   data_collected, ha->fw_dump_size);
+			return rval;
+		}
+
+		if (!(entry_hdr->d_ctrl.entry_capture_mask &
+		      ha->fw_dump_capture_mask)) {
+			entry_hdr->d_ctrl.driver_flags |=
+						QLA8XXX_DBG_SKIPPED_FLAG;
+			goto skip_nxt_entry;
+		}
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Data collected: [0x%x], Dump size left:[0x%x]\n",
+				  data_collected,
+				  (ha->fw_dump_size - data_collected)));
+
+		/* Decode the entry type and take required action to capture
+		 * debug data
+		 */
+		switch (entry_hdr->entry_type) {
+		case QLA8XXX_RDEND:
+			qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA8XXX_CNTRL:
+			rval = qla4_8xxx_minidump_process_control(ha,
+								  entry_hdr);
+			if (rval != QLA_SUCCESS) {
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA8XXX_RDCRB:
+			qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA8XXX_RDMEM:
+			rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+								&data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA8XXX_BOARD:
+		case QLA8XXX_RDROM:
+			if (is_qla8022(ha)) {
+				qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
+								 &data_ptr);
+			} else if (is_qla8032(ha) || is_qla8042(ha)) {
+				rval = qla4_83xx_minidump_process_rdrom(ha,
+								    entry_hdr,
+								    &data_ptr);
+				if (rval != QLA_SUCCESS)
+					qla4_8xxx_mark_entry_skipped(ha,
+								     entry_hdr,
+								     i);
+			}
+			break;
+		case QLA8XXX_L2DTG:
+		case QLA8XXX_L2ITG:
+		case QLA8XXX_L2DAT:
+		case QLA8XXX_L2INS:
+			rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+								&data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA8XXX_L1DTG:
+		case QLA8XXX_L1ITG:
+		case QLA8XXX_L1DAT:
+		case QLA8XXX_L1INS:
+			qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+							   &data_ptr);
+			break;
+		case QLA8XXX_RDOCM:
+			qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA8XXX_RDMUX:
+			qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA8XXX_QUEUE:
+			qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+							 &data_ptr);
+			break;
+		case QLA83XX_POLLRD:
+			if (is_qla8022(ha)) {
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				break;
+			}
+			rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
+							       &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA83XX_RDMUX2:
+			if (is_qla8022(ha)) {
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				break;
+			}
+			qla83xx_minidump_process_rdmux2(ha, entry_hdr,
+							&data_ptr);
+			break;
+		case QLA83XX_POLLRDMWR:
+			if (is_qla8022(ha)) {
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+				break;
+			}
+			rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
+								  &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA8044_RDDFE:
+			rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
+								&data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA8044_RDMDIO:
+			rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
+								 &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA8044_POLLWR:
+			rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
+								 &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		case QLA8XXX_RDNOP:
+		default:
+			qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+			break;
+		}
+
+		data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
+skip_nxt_entry:
+		/*  next entry in the template */
+		entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
+				(((uint8_t *)entry_hdr) +
+				 entry_hdr->entry_size);
+	}
+
+	if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
+		ql4_printk(KERN_INFO, ha,
+			   "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+			   data_collected, ha->fw_dump_size);
+		rval = QLA_ERROR;
+		goto md_failed;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+			  __func__, i));
+md_failed:
+	return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+	char event_string[40];
+	char *envp[] = { event_string, NULL };
+
+	switch (code) {
+	case QL4_UEVENT_CODE_FW_DUMP:
+		snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+			 ha->host_no);
+		break;
+	default:
+		/*do nothing*/
+		break;
+	}
+
+	kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
+void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
+{
+	if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+	    !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+		if (!qla4_8xxx_collect_md_data(ha)) {
+			qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+			set_bit(AF_82XX_FW_DUMPED, &ha->flags);
+		} else {
+			ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
+				   __func__);
+		}
+	}
+}
+
+/**
+ * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
+{
+	int rval = QLA_ERROR;
+	int i;
+	uint32_t old_count, count;
+	int need_reset = 0;
+
+	need_reset = ha->isp_ops->need_reset(ha);
+
+	if (need_reset) {
+		/* We are trying to perform a recovery here. */
+		if (test_bit(AF_FW_RECOVERY, &ha->flags))
+			ha->isp_ops->rom_lock_recovery(ha);
+	} else  {
+		old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+		for (i = 0; i < 10; i++) {
+			msleep(200);
+			count = qla4_8xxx_rd_direct(ha,
+						    QLA8XXX_PEG_ALIVE_COUNTER);
+			if (count != old_count) {
+				rval = QLA_SUCCESS;
+				goto dev_ready;
+			}
+		}
+		ha->isp_ops->rom_lock_recovery(ha);
+	}
+
+	/* set to DEV_INITIALIZING */
+	ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+			    QLA8XXX_DEV_INITIALIZING);
+
+	ha->isp_ops->idc_unlock(ha);
+
+	if (is_qla8022(ha))
+		qla4_8xxx_get_minidump(ha);
+
+	rval = ha->isp_ops->restart_firmware(ha);
+	ha->isp_ops->idc_lock(ha);
+
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
+		qla4_8xxx_clear_drv_active(ha);
+		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+				    QLA8XXX_DEV_FAILED);
+		return rval;
+	}
+
+dev_ready:
+	ql4_printk(KERN_INFO, ha, "HW State: READY\n");
+	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
+
+	return rval;
+}
+
+/**
+ * qla4_82xx_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
+{
+	uint32_t dev_state, drv_state, drv_active;
+	uint32_t active_mask = 0xFFFFFFFF;
+	unsigned long reset_timeout;
+
+	ql4_printk(KERN_INFO, ha,
+		"Performing ISP error recovery\n");
+
+	if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
+		qla4_82xx_idc_unlock(ha);
+		ha->isp_ops->disable_intrs(ha);
+		qla4_82xx_idc_lock(ha);
+	}
+
+	if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "%s(%ld): reset acknowledged\n",
+				  __func__, ha->host_no));
+		qla4_8xxx_set_rst_ready(ha);
+	} else {
+		active_mask = (~(1 << (ha->func_num * 4)));
+	}
+
+	/* wait for 10 seconds for reset ack from all functions */
+	reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
+
+	drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+	ql4_printk(KERN_INFO, ha,
+		"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+		__func__, ha->host_no, drv_state, drv_active);
+
+	while (drv_state != (drv_active & active_mask)) {
+		if (time_after_eq(jiffies, reset_timeout)) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+				   DRIVER_NAME, drv_state, drv_active);
+			break;
+		}
+
+		/*
+		 * When reset_owner times out, check which functions
+		 * acked/did not ack
+		 */
+		if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+				   __func__, ha->host_no, drv_state,
+				   drv_active);
+		}
+		qla4_82xx_idc_unlock(ha);
+		msleep(1000);
+		qla4_82xx_idc_lock(ha);
+
+		drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+		drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	}
+
+	/* Clear RESET OWNER as we are not going to use it any further */
+	clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
+
+	dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+		   dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+
+	/* Force to DEV_COLD unless someone else is starting a reset */
+	if (dev_state != QLA8XXX_DEV_INITIALIZING) {
+		ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+		qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
+		qla4_8xxx_set_rst_ready(ha);
+	}
+}
+
+/**
+ * qla4_8xxx_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+void
+qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
+{
+	ha->isp_ops->idc_lock(ha);
+	qla4_8xxx_set_qsnt_ready(ha);
+	ha->isp_ops->idc_unlock(ha);
+}
+
+static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+	int idc_ver;
+	uint32_t drv_active;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+	if (drv_active == (1 << (ha->func_num * 4))) {
+		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
+				    QLA82XX_IDC_VERSION);
+		ql4_printk(KERN_INFO, ha,
+			   "%s: IDC version updated to %d\n", __func__,
+			   QLA82XX_IDC_VERSION);
+	} else {
+		idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+		if (QLA82XX_IDC_VERSION != idc_ver) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+				   __func__, QLA82XX_IDC_VERSION, idc_ver);
+		}
+	}
+}
+
+static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
+{
+	int idc_ver;
+	uint32_t drv_active;
+	int rval = QLA_SUCCESS;
+
+	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+	if (drv_active == (1 << ha->func_num)) {
+		idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+		idc_ver &= (~0xFF);
+		idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
+		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
+		ql4_printk(KERN_INFO, ha,
+			   "%s: IDC version updated to %d\n", __func__,
+			   idc_ver);
+	} else {
+		idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
+		idc_ver &= 0xFF;
+		if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
+			ql4_printk(KERN_INFO, ha,
+				   "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
+				   __func__, QLA83XX_IDC_VER_MAJ_VALUE,
+				   idc_ver);
+			rval = QLA_ERROR;
+			goto exit_set_idc_ver;
+		}
+	}
+
+	/* Update IDC_MINOR_VERSION */
+	idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
+	idc_ver &= ~(0x03 << (ha->func_num * 2));
+	idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
+	qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+	return rval;
+}
+
+int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
+{
+	uint32_t drv_active;
+	int rval = QLA_SUCCESS;
+
+	if (test_bit(AF_INIT_DONE, &ha->flags))
+		goto exit_update_idc_reg;
+
+	ha->isp_ops->idc_lock(ha);
+	qla4_8xxx_set_drv_active(ha);
+
+	/*
+	 * If we are the first driver to load and
+	 * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
+	 */
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
+		if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
+			qla4_83xx_clear_idc_dontreset(ha);
+	}
+
+	if (is_qla8022(ha)) {
+		qla4_82xx_set_idc_ver(ha);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		rval = qla4_83xx_set_idc_ver(ha);
+		if (rval == QLA_ERROR)
+			qla4_8xxx_clear_drv_active(ha);
+	}
+
+	ha->isp_ops->idc_unlock(ha);
+
+exit_update_idc_reg:
+	return rval;
+}
+
+/**
+ * qla4_8xxx_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
+{
+	uint32_t dev_state;
+	int rval = QLA_SUCCESS;
+	unsigned long dev_init_timeout;
+
+	rval = qla4_8xxx_update_idc_reg(ha);
+	if (rval == QLA_ERROR)
+		goto exit_state_handler;
+
+	dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+			  dev_state, dev_state < MAX_STATES ?
+			  qdev_state[dev_state] : "Unknown"));
+
+	/* wait for 30 seconds for device to go ready */
+	dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+
+	ha->isp_ops->idc_lock(ha);
+	while (1) {
+
+		if (time_after_eq(jiffies, dev_init_timeout)) {
+			ql4_printk(KERN_WARNING, ha,
+				   "%s: Device Init Failed 0x%x = %s\n",
+				   DRIVER_NAME,
+				   dev_state, dev_state < MAX_STATES ?
+				   qdev_state[dev_state] : "Unknown");
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_FAILED);
+		}
+
+		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+		ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+			   dev_state, dev_state < MAX_STATES ?
+			   qdev_state[dev_state] : "Unknown");
+
+		/* NOTE: Make sure idc unlocked upon exit of switch statement */
+		switch (dev_state) {
+		case QLA8XXX_DEV_READY:
+			goto exit;
+		case QLA8XXX_DEV_COLD:
+			rval = qla4_8xxx_device_bootstrap(ha);
+			goto exit;
+		case QLA8XXX_DEV_INITIALIZING:
+			ha->isp_ops->idc_unlock(ha);
+			msleep(1000);
+			ha->isp_ops->idc_lock(ha);
+			break;
+		case QLA8XXX_DEV_NEED_RESET:
+			/*
+			 * For ISP8324 and ISP8042, if NEED_RESET is set by any
+			 * driver, it should be honored, irrespective of
+			 * IDC_CTRL DONTRESET_BIT0
+			 */
+			if (is_qla8032(ha) || is_qla8042(ha)) {
+				qla4_83xx_need_reset_handler(ha);
+			} else if (is_qla8022(ha)) {
+				if (!ql4xdontresethba) {
+					qla4_82xx_need_reset_handler(ha);
+					/* Update timeout value after need
+					 * reset handler */
+					dev_init_timeout = jiffies +
+						(ha->nx_dev_init_timeout * HZ);
+				} else {
+					ha->isp_ops->idc_unlock(ha);
+					msleep(1000);
+					ha->isp_ops->idc_lock(ha);
+				}
+			}
+			break;
+		case QLA8XXX_DEV_NEED_QUIESCENT:
+			/* idc locked/unlocked in handler */
+			qla4_8xxx_need_qsnt_handler(ha);
+			break;
+		case QLA8XXX_DEV_QUIESCENT:
+			ha->isp_ops->idc_unlock(ha);
+			msleep(1000);
+			ha->isp_ops->idc_lock(ha);
+			break;
+		case QLA8XXX_DEV_FAILED:
+			ha->isp_ops->idc_unlock(ha);
+			qla4xxx_dead_adapter_cleanup(ha);
+			rval = QLA_ERROR;
+			ha->isp_ops->idc_lock(ha);
+			goto exit;
+		default:
+			ha->isp_ops->idc_unlock(ha);
+			qla4xxx_dead_adapter_cleanup(ha);
+			rval = QLA_ERROR;
+			ha->isp_ops->idc_lock(ha);
+			goto exit;
+		}
+	}
+exit:
+	ha->isp_ops->idc_unlock(ha);
+exit_state_handler:
+	return rval;
+}
+
+int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
+{
+	int retval;
+
+	/* clear the interrupt */
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		writel(0, &ha->qla4_83xx_reg->risc_intr);
+		readl(&ha->qla4_83xx_reg->risc_intr);
+	} else if (is_qla8022(ha)) {
+		writel(0, &ha->qla4_82xx_reg->host_int);
+		readl(&ha->qla4_82xx_reg->host_int);
+	}
+
+	retval = qla4_8xxx_device_state_handler(ha);
+
+	/* Initialize request and response queues. */
+	if (retval == QLA_SUCCESS)
+		qla4xxx_init_rings(ha);
+
+	if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
+		retval = qla4xxx_request_irqs(ha);
+
+	return retval;
+}
+
+/*****************************************************************************/
+/* Flash Manipulation Routines                                               */
+/*****************************************************************************/
+
+#define OPTROM_BURST_SIZE       0x1000
+#define OPTROM_BURST_DWORDS     (OPTROM_BURST_SIZE / 4)
+
+#define FARX_DATA_FLAG	BIT_31
+#define FARX_ACCESS_FLASH_CONF	0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA	0x7FF00000
+
+static inline uint32_t
+flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
+{
+	return hw->flash_conf_off | faddr;
+}
+
+static inline uint32_t
+flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
+{
+	return hw->flash_data_off | faddr;
+}
+
+static uint32_t *
+qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
+    uint32_t faddr, uint32_t length)
+{
+	uint32_t i;
+	uint32_t val;
+	int loops = 0;
+	while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+		udelay(100);
+		cond_resched();
+		loops++;
+	}
+	if (loops >= 50000) {
+		ql4_printk(KERN_WARNING, ha, "ROM lock failed\n");
+		return dwptr;
+	}
+
+	/* Dword reads to flash. */
+	for (i = 0; i < length/4; i++, faddr += 4) {
+		if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
+			ql4_printk(KERN_WARNING, ha,
+			    "Do ROM fast read failed\n");
+			goto done_read;
+		}
+		dwptr[i] = __constant_cpu_to_le32(val);
+	}
+
+done_read:
+	qla4_82xx_rom_unlock(ha);
+	return dwptr;
+}
+
+/**
+ * Address and length are byte address
+ **/
+static uint8_t *
+qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+		uint32_t offset, uint32_t length)
+{
+	qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
+	return buf;
+}
+
+static int
+qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start)
+{
+	const char *loc, *locations[] = { "DEF", "PCI" };
+
+	/*
+	 * FLT-location structure resides after the last PCI region.
+	 */
+
+	/* Begin with sane defaults. */
+	loc = locations[0];
+	*start = FA_FLASH_LAYOUT_ADDR_82;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
+	return QLA_SUCCESS;
+}
+
+static void
+qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
+{
+	const char *loc, *locations[] = { "DEF", "FLT" };
+	uint16_t *wptr;
+	uint16_t cnt, chksum;
+	uint32_t start, status;
+	struct qla_flt_header *flt;
+	struct qla_flt_region *region;
+	struct ql82xx_hw_data *hw = &ha->hw;
+
+	hw->flt_region_flt = flt_addr;
+	wptr = (uint16_t *)ha->request_ring;
+	flt = (struct qla_flt_header *)ha->request_ring;
+	region = (struct qla_flt_region *)&flt[1];
+
+	if (is_qla8022(ha)) {
+		qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+					   flt_addr << 2, OPTROM_BURST_SIZE);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
+						  (uint8_t *)ha->request_ring,
+						  0x400);
+		if (status != QLA_SUCCESS)
+			goto no_flash_data;
+	}
+
+	if (*wptr == __constant_cpu_to_le16(0xffff))
+		goto no_flash_data;
+	if (flt->version != __constant_cpu_to_le16(1)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
+			"version=0x%x length=0x%x checksum=0x%x.\n",
+			le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+			le16_to_cpu(flt->checksum)));
+		goto no_flash_data;
+	}
+
+	cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+	for (chksum = 0; cnt; cnt--)
+		chksum += le16_to_cpu(*wptr++);
+	if (chksum) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
+			"version=0x%x length=0x%x checksum=0x%x.\n",
+			le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+			chksum));
+		goto no_flash_data;
+	}
+
+	loc = locations[1];
+	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+	for ( ; cnt; cnt--, region++) {
+		/* Store addresses as DWORD offsets. */
+		start = le32_to_cpu(region->start) >> 2;
+
+		DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
+		    "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
+		    le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
+
+		switch (le32_to_cpu(region->code) & 0xff) {
+		case FLT_REG_FDT:
+			hw->flt_region_fdt = start;
+			break;
+		case FLT_REG_BOOT_CODE_82:
+			hw->flt_region_boot = start;
+			break;
+		case FLT_REG_FW_82:
+		case FLT_REG_FW_82_1:
+			hw->flt_region_fw = start;
+			break;
+		case FLT_REG_BOOTLOAD_82:
+			hw->flt_region_bootload = start;
+			break;
+		case FLT_REG_ISCSI_PARAM:
+			hw->flt_iscsi_param =  start;
+			break;
+		case FLT_REG_ISCSI_CHAP:
+			hw->flt_region_chap =  start;
+			hw->flt_chap_size =  le32_to_cpu(region->size);
+			break;
+		case FLT_REG_ISCSI_DDB:
+			hw->flt_region_ddb =  start;
+			hw->flt_ddb_size =  le32_to_cpu(region->size);
+			break;
+		}
+	}
+	goto done;
+
+no_flash_data:
+	/* Use hardcoded defaults. */
+	loc = locations[0];
+
+	hw->flt_region_fdt      = FA_FLASH_DESCR_ADDR_82;
+	hw->flt_region_boot     = FA_BOOT_CODE_ADDR_82;
+	hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
+	hw->flt_region_fw       = FA_RISC_CODE_ADDR_82;
+	hw->flt_region_chap	= FA_FLASH_ISCSI_CHAP >> 2;
+	hw->flt_chap_size	= FA_FLASH_CHAP_SIZE;
+	hw->flt_region_ddb	= FA_FLASH_ISCSI_DDB >> 2;
+	hw->flt_ddb_size	= FA_FLASH_DDB_SIZE;
+
+done:
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x  ddb_size=0x%x\n",
+			  loc, hw->flt_region_flt, hw->flt_region_fdt,
+			  hw->flt_region_boot, hw->flt_region_bootload,
+			  hw->flt_region_fw, hw->flt_region_chap,
+			  hw->flt_chap_size, hw->flt_region_ddb,
+			  hw->flt_ddb_size));
+}
+
+static void
+qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
+{
+#define FLASH_BLK_SIZE_4K       0x1000
+#define FLASH_BLK_SIZE_32K      0x8000
+#define FLASH_BLK_SIZE_64K      0x10000
+	const char *loc, *locations[] = { "MID", "FDT" };
+	uint16_t cnt, chksum;
+	uint16_t *wptr;
+	struct qla_fdt_layout *fdt;
+	uint16_t mid = 0;
+	uint16_t fid = 0;
+	struct ql82xx_hw_data *hw = &ha->hw;
+
+	hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+	hw->flash_data_off = FARX_ACCESS_FLASH_DATA;
+
+	wptr = (uint16_t *)ha->request_ring;
+	fdt = (struct qla_fdt_layout *)ha->request_ring;
+	qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+	    hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
+
+	if (*wptr == __constant_cpu_to_le16(0xffff))
+		goto no_flash_data;
+
+	if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
+	    fdt->sig[3] != 'D')
+		goto no_flash_data;
+
+	for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
+	    cnt++)
+		chksum += le16_to_cpu(*wptr++);
+
+	if (chksum) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
+		    "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
+		    le16_to_cpu(fdt->version)));
+		goto no_flash_data;
+	}
+
+	loc = locations[1];
+	mid = le16_to_cpu(fdt->man_id);
+	fid = le16_to_cpu(fdt->id);
+	hw->fdt_wrt_disable = fdt->wrt_disable_bits;
+	hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd);
+	hw->fdt_block_size = le32_to_cpu(fdt->block_size);
+
+	if (fdt->unprotect_sec_cmd) {
+		hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 |
+		    fdt->unprotect_sec_cmd);
+		hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
+		    flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) :
+		    flash_conf_addr(hw, 0x0336);
+	}
+	goto done;
+
+no_flash_data:
+	loc = locations[0];
+	hw->fdt_block_size = FLASH_BLK_SIZE_64K;
+done:
+	DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+		"pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+		hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd,
+		hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable,
+		hw->fdt_block_size));
+}
+
+static void
+qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
+{
+#define QLA82XX_IDC_PARAM_ADDR      0x003e885c
+	uint32_t *wptr;
+
+	if (!is_qla8022(ha))
+		return;
+	wptr = (uint32_t *)ha->request_ring;
+	qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
+			QLA82XX_IDC_PARAM_ADDR , 8);
+
+	if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+		ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
+		ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
+	} else {
+		ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
+		ha->nx_reset_timeout = le32_to_cpu(*wptr);
+	}
+
+	DEBUG2(ql4_printk(KERN_DEBUG, ha,
+		"ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout));
+	DEBUG2(ql4_printk(KERN_DEBUG, ha,
+		"ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout));
+	return;
+}
+
+void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
+			      int in_count)
+{
+	int i;
+
+	/* Load all mailbox registers, except mailbox 0. */
+	for (i = 1; i < in_count; i++)
+		writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
+
+	/* Wakeup firmware  */
+	writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
+	readl(&ha->qla4_82xx_reg->mailbox_in[0]);
+	writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
+	readl(&ha->qla4_82xx_reg->hint);
+}
+
+void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
+{
+	int intr_status;
+
+	intr_status = readl(&ha->qla4_82xx_reg->host_int);
+	if (intr_status & ISRX_82XX_RISC_INT) {
+		ha->mbox_status_count = out_count;
+		intr_status = readl(&ha->qla4_82xx_reg->host_status);
+		ha->isp_ops->interrupt_service_routine(ha, intr_status);
+
+		if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+		    (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
+			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
+					0xfbff);
+	}
+}
+
+int
+qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
+{
+	int ret;
+	uint32_t flt_addr;
+
+	ret = qla4_8xxx_find_flt_start(ha, &flt_addr);
+	if (ret != QLA_SUCCESS)
+		return ret;
+
+	qla4_8xxx_get_flt_info(ha, flt_addr);
+	if (is_qla8022(ha)) {
+		qla4_82xx_get_fdt_info(ha);
+		qla4_82xx_get_idc_param(ha);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		qla4_83xx_get_idc_param(ha);
+	}
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance
+ * @ha: pointer to host adapter structure.
+ *
+ * Remarks:
+ * For iSCSI, throws away all I/O and AENs into bit bucket, so they will
+ * not be available after successful return.  Driver must cleanup potential
+ * outstanding I/O's after calling this funcion.
+ **/
+int
+qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
+{
+	int status;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_STOP_FW;
+	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1,
+	    &mbox_cmd[0], &mbox_sts[0]);
+
+	DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no,
+	    __func__, status));
+	return status;
+}
+
+/**
+ * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
+ * @ha: pointer to host adapter structure.
+ **/
+int
+qla4_82xx_isp_reset(struct scsi_qla_host *ha)
+{
+	int rval;
+	uint32_t dev_state;
+
+	qla4_82xx_idc_lock(ha);
+	dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+	if (dev_state == QLA8XXX_DEV_READY) {
+		ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+		qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+		    QLA8XXX_DEV_NEED_RESET);
+		set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+	} else
+		ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
+
+	qla4_82xx_idc_unlock(ha);
+
+	rval = qla4_8xxx_device_state_handler(ha);
+
+	qla4_82xx_idc_lock(ha);
+	qla4_8xxx_clear_rst_ready(ha);
+	qla4_82xx_idc_unlock(ha);
+
+	if (rval == QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
+		clear_bit(AF_FW_RECOVERY, &ha->flags);
+	}
+
+	return rval;
+}
+
+/**
+ * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number
+ * @ha: pointer to host adapter structure.
+ *
+ **/
+int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct mbx_sys_info *sys_info;
+	dma_addr_t sys_info_dma;
+	int status = QLA_ERROR;
+
+	sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+				      &sys_info_dma, GFP_KERNEL);
+	if (sys_info == NULL) {
+		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
+		    ha->host_no, __func__));
+		return status;
+	}
+
+	memset(sys_info, 0, sizeof(*sys_info));
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO;
+	mbox_cmd[1] = LSDW(sys_info_dma);
+	mbox_cmd[2] = MSDW(sys_info_dma);
+	mbox_cmd[4] = sizeof(*sys_info);
+
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0],
+	    &mbox_sts[0]) != QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n",
+		    ha->host_no, __func__));
+		goto exit_validate_mac82;
+	}
+
+	/* Make sure we receive the minimum required data to cache internally */
+	if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
+	    offsetof(struct mbx_sys_info, reserved)) {
+		DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
+		    " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
+		goto exit_validate_mac82;
+	}
+
+	/* Save M.A.C. address & serial_number */
+	ha->port_num = sys_info->port_num;
+	memcpy(ha->my_mac, &sys_info->mac_addr[0],
+	    min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
+	memcpy(ha->serial_number, &sys_info->serial_number,
+	    min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
+	memcpy(ha->model_name, &sys_info->board_id_str,
+	       min(sizeof(ha->model_name), sizeof(sys_info->board_id_str)));
+	ha->phy_port_cnt = sys_info->phys_port_cnt;
+	ha->phy_port_num = sys_info->port_num;
+	ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
+
+	DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
+	    ha->host_no, __func__, ha->my_mac, ha->serial_number));
+
+	status = QLA_SUCCESS;
+
+exit_validate_mac82:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
+			  sys_info_dma);
+	return status;
+}
+
+/* Interrupt handling helpers. */
+
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
+	mbox_cmd[1] = INTR_ENABLE;
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+		&mbox_sts[0]) != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+		    "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
+		    __func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+	return QLA_SUCCESS;
+}
+
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
+
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
+	mbox_cmd[1] = INTR_DISABLE;
+	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
+	    &mbox_sts[0]) != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+			"%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
+			__func__, mbox_sts[0]));
+		return QLA_ERROR;
+	}
+
+	return QLA_SUCCESS;
+}
+
+void
+qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
+{
+	qla4_8xxx_intr_enable(ha);
+
+	spin_lock_irq(&ha->hardware_lock);
+	/* BIT 10 - reset */
+	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+	spin_unlock_irq(&ha->hardware_lock);
+	set_bit(AF_INTERRUPTS_ON, &ha->flags);
+}
+
+void
+qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
+{
+	if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
+		qla4_8xxx_intr_disable(ha);
+
+	spin_lock_irq(&ha->hardware_lock);
+	/* BIT 10 - set */
+	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+	spin_unlock_irq(&ha->hardware_lock);
+}
+
+int
+qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
+{
+	int ret;
+
+	ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
+			QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+	if (ret < 0) {
+		ql4_printk(KERN_WARNING, ha,
+		    "MSI-X: Failed to enable support -- %d/%d\n",
+		    QLA_MSIX_ENTRIES, ret);
+		return ret;
+	}
+
+	ret = request_irq(pci_irq_vector(ha->pdev, 0),
+			qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
+			ha);
+	if (ret)
+		goto out_free_vectors;
+
+	ret = request_irq(pci_irq_vector(ha->pdev, 1),
+			qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
+	if (ret)
+		goto out_free_default_irq;
+
+	return 0;
+
+out_free_default_irq:
+	free_irq(pci_irq_vector(ha->pdev, 0), ha);
+out_free_vectors:
+	pci_free_irq_vectors(ha->pdev);
+	return ret;
+}
+
+int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
+{
+	int status = QLA_SUCCESS;
+
+	/* Dont retry adapter initialization if IRQ allocation failed */
+	if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
+			   __func__);
+		status = QLA_ERROR;
+		goto exit_init_adapter_failure;
+	}
+
+	/* Since interrupts are registered in start_firmware for
+	 * 8xxx, release them here if initialize_adapter fails
+	 * and retry adapter initialization */
+	qla4xxx_free_irqs(ha);
+
+exit_init_adapter_failure:
+	return status;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nx.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nx.h
new file mode 100644
index 0000000..337d9fc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_nx.h
@@ -0,0 +1,1032 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED		0xffff
+#define PHAN_INITIALIZE_COMPLETE	0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK		0xf00f
+#define PHAN_PEG_RCV_INITIALIZED	0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE		(QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X)			(QLA82XX_CRB_BASE+(X))
+#define CRB_CMDPEG_STATE		QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE		QLA82XX_REG(0x13c)
+#define CRB_DMA_SHIFT			QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE			QLA82XX_REG(0x1b4)
+#define CRB_CMDPEG_CHECK_RETRY_COUNT	60
+#define CRB_CMDPEG_CHECK_DELAY		500
+
+#define qla82xx_get_temp_val(x)		((x) >> 16)
+#define qla82xx_get_temp_state(x)	((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)	(((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+	QLA82XX_TEMP_NORMAL = 0x1,	/* Normal operating range */
+	QLA82XX_TEMP_WARN,	/* Sound alert, temperature getting high */
+	QLA82XX_TEMP_PANIC	/* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0		0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1		0x8
+
+#define QLA82XX_HW_H0_CH_HUB_ADR	0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR	0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR	0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR	0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR	0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR	0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR	0x08
+
+/*  Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR	0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR	0x25
+
+/*  Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR	0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR	0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR	0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR	0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR	0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR	0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR	0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR	0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR	0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR	0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR	0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR	0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR	0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR	0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR	0x18
+
+/*  Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR	0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR	0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR	0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR	0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR	0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR	0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR   0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR	0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR	0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR	0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR	0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR	0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR    0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR    0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR    0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR    0x11
+
+/*  Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR	0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR	0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR	0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR	0x08
+
+/*  Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR	0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR	0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR	0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR	0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR	0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR	0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR	0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR	0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR	0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR	0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR	0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR	0x4b
+
+/*  Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR	0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR	0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR	0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR	0x43
+
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR	0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR	0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR	0x46
+
+/*  Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR	0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR	0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR	0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR	0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR	0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR	0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR	0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR	0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR	0x07
+
+/*  This field defines PCI/X adr [25:20] of agents on the CRB */
+/*  */
+#define QLA82XX_HW_PX_MAP_CRB_PH	0
+#define QLA82XX_HW_PX_MAP_CRB_PS	1
+#define QLA82XX_HW_PX_MAP_CRB_MN	2
+#define QLA82XX_HW_PX_MAP_CRB_MS	3
+#define QLA82XX_HW_PX_MAP_CRB_SRE	5
+#define QLA82XX_HW_PX_MAP_CRB_NIU	6
+#define QLA82XX_HW_PX_MAP_CRB_QMN	7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0	8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1	9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2	10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3	11
+#define QLA82XX_HW_PX_MAP_CRB_QMS	12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0	13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1	14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2	15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3	16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0	17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1	18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2	19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3	20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4	QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND	21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI	22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0	23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1	24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2	25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3	26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD	27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI	28
+#define QLA82XX_HW_PX_MAP_CRB_SN	29
+#define QLA82XX_HW_PX_MAP_CRB_EG	31
+#define QLA82XX_HW_PX_MAP_CRB_PH2	32
+#define QLA82XX_HW_PX_MAP_CRB_PS2	33
+#define QLA82XX_HW_PX_MAP_CRB_CAM	34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0	35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1	36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2	37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0	38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1	39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR	40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1	42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2	43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3	44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4	45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5	46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6	47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7	48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA	49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q	50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB    51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3	52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0	53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8	54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9	55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0	56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1	57
+#define QLA82XX_HW_PX_MAP_CRB_SMB	58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0	59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1	60
+#define QLA82XX_HW_PX_MAP_CRB_LPC	61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC	62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0	63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1	4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2	30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3	41
+
+/*  This field defines CRB adr [31:20] of the agents */
+/*  */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN	((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+					QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH	((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+					QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS	((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+					QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS	((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS	((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS	    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB	    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SMB_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU      ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0     ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1     ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_I2C1_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE      ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG       ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN      ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_CAS3_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM      ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN       ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q      ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB   ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC      ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+					    QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB	(QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE		(ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS		(ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET		(ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS		(ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA		(ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT		(ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT	(ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA		(ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM	(QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE	(ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST	(ROMUSB_GLB + 0x0038)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER		0x0d417340
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE	0x00100000    /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A)	(QLA82XX_PCI_CRBSPACE + \
+					(A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+
+#define QLA82XX_CRB_C2C_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM	\
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU	\
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+/* HACK upon HACK upon HACK (for PCIE builds) */
+#define QLA82XX_CRB_PCIX_HOST \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE	QLA82XX_CRB_PCIX_MD
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+
+#define QLA82XX_CRB_PEG_MD_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+
+#define QLA82XX_CRB_MAX		QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA8XXX_ADDR_DDR_NET		(0x0000000000000000ULL)
+#define QLA8XXX_ADDR_DDR_NET_MAX	(0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE	(0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE	(0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX	(0x0000000FFFFFFFFFULL)
+#define QLA8XXX_ADDR_OCM0	(0x0000000200000000ULL)
+#define QLA8XXX_ADDR_OCM0_MAX	(0x00000002000fffffULL)
+#define QLA8XXX_ADDR_OCM1	(0x0000000200400000ULL)
+#define QLA8XXX_ADDR_OCM1_MAX	(0x00000002004fffffULL)
+#define QLA8XXX_ADDR_QDR_NET	(0x0000000300000000ULL)
+
+#define QLA82XX_P2_ADDR_QDR_NET_MAX	(0x00000003001fffffULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX	(0x0000000303ffffffULL)
+#define QLA8XXX_ADDR_QDR_NET_MAX	(0x0000000307ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE		(unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB		(unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM		(unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX		(unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET		(unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET		(unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX		(unsigned long)0x043fffff
+
+/*  PCI Windowing for DDR regions.  */
+#define QLA8XXX_ADDR_IN_RANGE(addr, low, high)            \
+	(((addr) <= (high)) && ((addr) >= (low)))
+
+/*
+ *   Register offsets for MN
+ */
+#define MIU_CONTROL			(0x000)
+#define MIU_TAG				(0x004)
+#define MIU_TEST_AGT_CTRL		(0x090)
+#define MIU_TEST_AGT_ADDR_LO		(0x094)
+#define MIU_TEST_AGT_ADDR_HI		(0x098)
+#define MIU_TEST_AGT_WRDATA_LO		(0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI		(0x0a4)
+#define MIU_TEST_AGT_WRDATA(i)		(0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO		(0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI		(0x0ac)
+#define MIU_TEST_AGT_RDDATA(i)		(0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK		0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off)	(0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START	1
+#define MIU_TA_CTL_ENABLE	2
+#define MIU_TA_CTL_WRITE	4
+#define MIU_TA_CTL_BUSY		8
+
+#define MIU_TA_CTL_WRITE_ENABLE		(MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START		(MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\
+					 MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE		(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE	(QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg)	(QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PORT_MODE_ADDR		(QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1	(QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2	(QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER	(QLA82XX_CAM_RAM(0xb0))
+#define QLA82XX_CAM_RAM_DB1		(QLA82XX_CAM_RAM(0x1b0))
+#define QLA82XX_CAM_RAM_DB2		(QLA82XX_CAM_RAM(0x1b4))
+
+#define HALT_STATUS_UNRECOVERABLE	0x80000000
+#define HALT_STATUS_RECOVERABLE		0x40000000
+
+
+#define QLA82XX_ROM_LOCK_ID		(QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID		(QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR	(QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR	(QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB		(QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg)		(QLA82XX_CRB_PCIE + (reg))
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE		(QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE		(QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DRV_STATE		(QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH		(QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO	(QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION	(QLA82XX_CAM_RAM(0x174))
+
+enum qla_regs {
+	QLA8XXX_PEG_HALT_STATUS1 = 0,
+	QLA8XXX_PEG_HALT_STATUS2,
+	QLA8XXX_PEG_ALIVE_COUNTER,
+	QLA8XXX_CRB_DRV_ACTIVE,
+	QLA8XXX_CRB_DEV_STATE,
+	QLA8XXX_CRB_DRV_STATE,
+	QLA8XXX_CRB_DRV_SCRATCH,
+	QLA8XXX_CRB_DEV_PART_INFO,
+	QLA8XXX_CRB_DRV_IDC_VERSION,
+	QLA8XXX_FW_VERSION_MAJOR,
+	QLA8XXX_FW_VERSION_MINOR,
+	QLA8XXX_FW_VERSION_SUB,
+	QLA8XXX_CRB_CMDPEG_STATE,
+	QLA8XXX_CRB_TEMP_STATE,
+};
+
+static const uint32_t qla4_82xx_reg_tbl[] = {
+	QLA82XX_PEG_HALT_STATUS1,
+	QLA82XX_PEG_HALT_STATUS2,
+	QLA82XX_PEG_ALIVE_COUNTER,
+	QLA82XX_CRB_DRV_ACTIVE,
+	QLA82XX_CRB_DEV_STATE,
+	QLA82XX_CRB_DRV_STATE,
+	QLA82XX_CRB_DRV_SCRATCH,
+	QLA82XX_CRB_DEV_PART_INFO,
+	QLA82XX_CRB_DRV_IDC_VERSION,
+	QLA82XX_FW_VERSION_MAJOR,
+	QLA82XX_FW_VERSION_MINOR,
+	QLA82XX_FW_VERSION_SUB,
+	CRB_CMDPEG_STATE,
+	CRB_TEMP_STATE,
+};
+
+/* Every driver should use these Device State */
+#define QLA8XXX_DEV_COLD		1
+#define QLA8XXX_DEV_INITIALIZING	2
+#define QLA8XXX_DEV_READY		3
+#define QLA8XXX_DEV_NEED_RESET		4
+#define QLA8XXX_DEV_NEED_QUIESCENT	5
+#define QLA8XXX_DEV_FAILED		6
+#define QLA8XXX_DEV_QUIESCENT		7
+#define MAX_STATES			8 /* Increment if new state added */
+
+#define QLA82XX_IDC_VERSION		0x1
+#define ROM_DEV_INIT_TIMEOUT		30
+#define ROM_DRV_RESET_ACK_TIMEOUT	10
+
+#define PCIE_SETUP_FUNCTION		(0x12040)
+#define PCIE_SETUP_FUNCTION2		(0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg)	(QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg)	(QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK		(0x1c010)  /* Flash lock   */
+#define PCIE_SEM2_UNLOCK	(0x1c014)  /* Flash unlock */
+#define PCIE_SEM5_LOCK		(0x1c028)  /* Coexistence lock   */
+#define PCIE_SEM5_UNLOCK	(0x1c02c)  /* Coexistence unlock */
+#define PCIE_SEM7_LOCK		(0x1c038)  /* crb win lock */
+#define PCIE_SEM7_UNLOCK	(0x1c03c)  /* crbwin unlock*/
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define QLA82XX_MSIX_TBL_SPACE		8192
+#define QLA82XX_PCI_REG_MSIX_TBL	0x44
+#define QLA82XX_PCI_MSIX_CONTROL	0x40
+
+struct crb_128M_2M_sub_block_map {
+	unsigned valid;
+	unsigned start_128M;
+	unsigned end_128M;
+	unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+	struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+	long addr;
+	long data;
+};
+
+#define ADDR_ERROR	((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK	1000
+#define QLA82XX_FWERROR_CODE(code)	((code >> 8) & 0x1fffff)
+
+/***************************************************************************
+ *		PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS	(0x10118)
+#define PCIX_TARGET_STATUS_F1	(0x10160)
+#define PCIX_TARGET_STATUS_F2	(0x10164)
+#define PCIX_TARGET_STATUS_F3	(0x10168)
+#define PCIX_TARGET_STATUS_F4	(0x10360)
+#define PCIX_TARGET_STATUS_F5	(0x10364)
+#define PCIX_TARGET_STATUS_F6	(0x10368)
+#define PCIX_TARGET_STATUS_F7	(0x1036c)
+
+#define PCIX_TARGET_MASK	(0x10128)
+#define PCIX_TARGET_MASK_F1	(0x10170)
+#define PCIX_TARGET_MASK_F2	(0x10174)
+#define PCIX_TARGET_MASK_F3	(0x10178)
+#define PCIX_TARGET_MASK_F4	(0x10370)
+#define PCIX_TARGET_MASK_F5	(0x10374)
+#define PCIX_TARGET_MASK_F6	(0x10378)
+#define PCIX_TARGET_MASK_F7	(0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0		(0x13000)
+#define PCIX_MSI_F1		(0x13004)
+#define PCIX_MSI_F2		(0x13008)
+#define PCIX_MSI_F3		(0x1300c)
+#define PCIX_MSI_F4		(0x13010)
+#define PCIX_MSI_F5		(0x13014)
+#define PCIX_MSI_F6		(0x13018)
+#define PCIX_MSI_F7		(0x1301c)
+#define PCIX_MSI_F(FUNC)	(0x13000 + ((FUNC) * 4))
+
+/*
+ *
+ */
+#define PCIX_INT_VECTOR		(0x10100)
+#define PCIX_INT_MASK		(0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC		(0x1206c)
+
+
+#define ISR_INT_TARGET_STATUS \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR			(QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK			(QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG		(QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define	ISR_MSI_INT_TRIGGER(FUNC)	(QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+
+#define	ISR_IS_LEGACY_INTR_IDLE(VAL)		(((VAL) & 0x300) == 0)
+#define	ISR_IS_LEGACY_INTR_TRIGGERED(VAL)	(((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define	PCIX_INT_VECTOR_BIT_F0	0x0080
+#define	PCIX_INT_VECTOR_BIT_F1	0x0100
+#define	PCIX_INT_VECTOR_BIT_F2	0x0200
+#define	PCIX_INT_VECTOR_BIT_F3	0x0400
+#define	PCIX_INT_VECTOR_BIT_F4	0x0800
+#define	PCIX_INT_VECTOR_BIT_F5	0x1000
+#define	PCIX_INT_VECTOR_BIT_F6	0x2000
+#define	PCIX_INT_VECTOR_BIT_F7	0x4000
+
+/* struct qla4_8xxx_legacy_intr_set defined in ql4_def.h */
+
+#define QLA82XX_LEGACY_INTR_CONFIG                                      \
+{                                                                       \
+	{                                                               \
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F0,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS,          \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK,            \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(0) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F1,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F1,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F1,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(1) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F2,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F2,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F2,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(2) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F3,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F3,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F3,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(3) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F4,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F4,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F4,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(4) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F5,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F5,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F5,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(5) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F6,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F6,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F6,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(6) },       \
+									\
+	{								\
+		.int_vec_bit    =	PCIX_INT_VECTOR_BIT_F7,         \
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F7,       \
+		.tgt_mask_reg   =	ISR_INT_TARGET_MASK_F7,         \
+		.pci_int_reg    =	ISR_MSI_INT_TRIGGER(7) },       \
+}
+
+/* Magic number to let user know flash is programmed */
+#define	QLA82XX_BDINFO_MAGIC	0x12345678
+#define FW_SIZE_OFFSET		(0x3e840c)
+
+/* QLA82XX additions */
+#define MIU_TEST_AGT_WRDATA_UPPER_LO	(0x0b0)
+#define	MIU_TEST_AGT_WRDATA_UPPER_HI	(0x0b4)
+
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA8XXX_RDNOP	0
+#define QLA8XXX_RDCRB	1
+#define QLA8XXX_RDMUX	2
+#define QLA8XXX_QUEUE	3
+#define QLA8XXX_BOARD	4
+#define QLA8XXX_RDOCM	6
+#define QLA8XXX_PREGS	7
+#define QLA8XXX_L1DTG	8
+#define QLA8XXX_L1ITG	9
+#define QLA8XXX_L1DAT	11
+#define QLA8XXX_L1INS	12
+#define QLA8XXX_L2DTG	21
+#define QLA8XXX_L2ITG	22
+#define QLA8XXX_L2DAT	23
+#define QLA8XXX_L2INS	24
+#define QLA83XX_POLLRD	35
+#define QLA83XX_RDMUX2	36
+#define QLA83XX_POLLRDMWR  37
+#define QLA8044_RDDFE	38
+#define QLA8044_RDMDIO	39
+#define QLA8044_POLLWR	40
+#define QLA8XXX_RDROM	71
+#define QLA8XXX_RDMEM	72
+#define QLA8XXX_CNTRL	98
+#define QLA83XX_TLHDR	99
+#define QLA8XXX_RDEND	255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA8XXX_DBG_OPCODE_WR		0x01
+#define QLA8XXX_DBG_OPCODE_RW		0x02
+#define QLA8XXX_DBG_OPCODE_AND		0x04
+#define QLA8XXX_DBG_OPCODE_OR		0x08
+#define QLA8XXX_DBG_OPCODE_POLL		0x10
+#define QLA8XXX_DBG_OPCODE_RDSTATE	0x20
+#define QLA8XXX_DBG_OPCODE_WRSTATE	0x40
+#define QLA8XXX_DBG_OPCODE_MDSTATE	0x80
+
+/* Driver Flags */
+#define QLA8XXX_DBG_SKIPPED_FLAG	0x80 /* driver skipped this entry  */
+#define QLA8XXX_DBG_SIZE_ERR_FLAG	0x40 /* Entry vs Capture size
+					      * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8xxx_minidump_entry_hdr {
+	uint32_t entry_type;
+	uint32_t entry_size;
+	uint32_t entry_capture_size;
+	struct {
+		uint8_t entry_capture_mask;
+		uint8_t entry_code;
+		uint8_t driver_code;
+		uint8_t driver_flags;
+	} d_ctrl;
+};
+
+/*  Read CRB entry header */
+struct qla8xxx_minidump_entry_crb {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t addr;
+	struct {
+		uint8_t addr_stride;
+		uint8_t state_index_a;
+		uint16_t poll_timeout;
+	} crb_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+
+	struct {
+		uint8_t opcode;
+		uint8_t state_index_v;
+		uint8_t shl;
+		uint8_t shr;
+	} crb_ctrl;
+
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t value_3;
+};
+
+struct qla8xxx_minidump_entry_cache {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t tag_reg_addr;
+	struct {
+		uint16_t tag_value_stride;
+		uint16_t init_tag_value;
+	} addr_ctrl;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t control_addr;
+	struct {
+		uint16_t write_value;
+		uint8_t poll_mask;
+		uint8_t poll_wait;
+	} cache_ctrl;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_1;
+	} read_ctrl;
+};
+
+/* Read OCM */
+struct qla8xxx_minidump_entry_rdocm {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t rsvd_0;
+	uint32_t rsvd_1;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_2;
+	uint32_t rsvd_3;
+	uint32_t read_addr;
+	uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla8xxx_minidump_entry_rdmem {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla8xxx_minidump_entry_rdrom {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla8xxx_minidump_entry_mux {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t rsvd_0;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t select_value;
+	uint32_t select_value_stride;
+	uint32_t read_addr;
+	uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla8xxx_minidump_entry_queue {
+	struct qla8xxx_minidump_entry_hdr h;
+	uint32_t select_addr;
+	struct {
+		uint16_t queue_id_stride;
+		uint16_t rsvd_0;
+	} q_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_1;
+	uint32_t rsvd_2;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_3;
+	} rd_strd;
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE	0x129
+#define RQST_TMPLT_SIZE				0x0
+#define RQST_TMPLT				0x1
+#define MD_DIRECT_ROM_WINDOW			0x42110030
+#define MD_DIRECT_ROM_READ_BASE			0x42150000
+#define MD_MIU_TEST_AGT_CTRL			0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO			0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI			0x41000098
+
+#define MD_MIU_TEST_AGT_WRDATA_LO		0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI		0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO		0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI		0x410000B4
+
+#define MD_MIU_TEST_AGT_RDDATA_LO		0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI		0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO		0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI		0x410000BC
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+				0x410000AC, 0x410000B8, 0x410000BC };
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_os.c b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_os.c
new file mode 100644
index 0000000..fb3abaf
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_os.c
@@ -0,0 +1,9952 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+
+#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+#include "ql4_83xx.h"
+
+/*
+ * Driver version
+ */
+static char qla4xxx_version_str[40];
+
+/*
+ * SRB allocation cache
+ */
+static struct kmem_cache *srb_cachep;
+
+/*
+ * Module parameter information and variables
+ */
+static int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+		 " Set to disable exporting boot targets to sysfs.\n"
+		 "\t\t  0 - Export boot targets\n"
+		 "\t\t  1 - Do not export boot targets (Default)");
+
+int ql4xdontresethba;
+module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdontresethba,
+		 " Don't reset the HBA for driver recovery.\n"
+		 "\t\t  0 - It will reset HBA (Default)\n"
+		 "\t\t  1 - It will NOT reset HBA");
+
+int ql4xextended_error_logging;
+module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xextended_error_logging,
+		 " Option to enable extended error logging.\n"
+		 "\t\t  0 - no logging (Default)\n"
+		 "\t\t  2 - debug logging");
+
+int ql4xenablemsix = 1;
+module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemsix,
+		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
+		 "\t\t  0 = enable INTx interrupt mechanism.\n"
+		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
+		 "\t\t  2 = enable MSI interrupt mechanism.");
+
+#define QL4_DEF_QDEPTH 32
+static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
+module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xmaxqdepth,
+		 " Maximum queue depth to report for target devices.\n"
+		 "\t\t  Default: 32.");
+
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+		 " Enable or disable dynamic tracking and adjustment of\n"
+		 "\t\t scsi device queue depth.\n"
+		 "\t\t  0 - Disable.\n"
+		 "\t\t  1 - Enable. (Default)");
+
+static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
+module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xsess_recovery_tmo,
+		" Target Session Recovery Timeout.\n"
+		"\t\t  Default: 120 sec.");
+
+int ql4xmdcapmask = 0;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+		 " Set the Minidump driver capture mask level.\n"
+		 "\t\t  Default is 0 (firmware default capture mask)\n"
+		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+		 " Set to enable minidump.\n"
+		 "\t\t  0 - disable minidump\n"
+		 "\t\t  1 - enable minidump (Default)");
+
+static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
+/*
+ * SCSI host template entry points
+ */
+static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
+
+/*
+ * iSCSI template entry points
+ */
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+				     enum iscsi_param param, char *buf);
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
+				  enum iscsi_param param, char *buf);
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+				  enum iscsi_host_param param, char *buf);
+static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
+				   uint32_t len);
+static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
+				   enum iscsi_param_type param_type,
+				   int param, char *buf);
+static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
+static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
+						 struct sockaddr *dst_addr,
+						 int non_blocking);
+static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
+static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
+static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
+				enum iscsi_param param, char *buf);
+static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
+static struct iscsi_cls_conn *
+qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
+static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+			     struct iscsi_cls_conn *cls_conn,
+			     uint64_t transport_fd, int is_leading);
+static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
+static struct iscsi_cls_session *
+qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+			uint16_t qdepth, uint32_t initial_cmdsn);
+static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
+static void qla4xxx_task_work(struct work_struct *wdata);
+static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
+static int qla4xxx_task_xmit(struct iscsi_task *);
+static void qla4xxx_task_cleanup(struct iscsi_task *);
+static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
+static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				   struct iscsi_stats *stats);
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+			     uint32_t iface_type, uint32_t payload_size,
+			     uint32_t pid, struct sockaddr *dst_addr);
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+				 uint32_t *num_entries, char *buf);
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void  *data,
+				  int len);
+static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
+
+/*
+ * SCSI host template entry points
+ */
+static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
+static int qla4xxx_slave_alloc(struct scsi_device *device);
+static umode_t qla4_attr_is_visible(int param_type, int param);
+static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+
+/*
+ * iSCSI Flash DDB sysfs entry points
+ */
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+			    struct iscsi_bus_flash_conn *fnode_conn,
+			    void *data, int len);
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+			    int param, char *buf);
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+				 int len);
+static int
+qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+				   struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
+
+static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
+    QLA82XX_LEGACY_INTR_CONFIG;
+
+static struct scsi_host_template qla4xxx_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= DRIVER_NAME,
+	.proc_name		= DRIVER_NAME,
+	.queuecommand		= qla4xxx_queuecommand,
+
+	.eh_abort_handler	= qla4xxx_eh_abort,
+	.eh_device_reset_handler = qla4xxx_eh_device_reset,
+	.eh_target_reset_handler = qla4xxx_eh_target_reset,
+	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
+	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
+
+	.slave_alloc		= qla4xxx_slave_alloc,
+	.change_queue_depth	= scsi_change_queue_depth,
+
+	.this_id		= -1,
+	.cmd_per_lun		= 3,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= SG_ALL,
+
+	.max_sectors		= 0xFFFF,
+	.shost_attrs		= qla4xxx_host_attrs,
+	.host_reset		= qla4xxx_host_reset,
+	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
+};
+
+static struct iscsi_transport qla4xxx_iscsi_transport = {
+	.owner			= THIS_MODULE,
+	.name			= DRIVER_NAME,
+	.caps			= CAP_TEXT_NEGO |
+				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
+				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
+				  CAP_MULTI_R2T,
+	.attr_is_visible	= qla4_attr_is_visible,
+	.create_session         = qla4xxx_session_create,
+	.destroy_session        = qla4xxx_session_destroy,
+	.start_conn             = qla4xxx_conn_start,
+	.create_conn            = qla4xxx_conn_create,
+	.bind_conn              = qla4xxx_conn_bind,
+	.stop_conn              = iscsi_conn_stop,
+	.destroy_conn           = qla4xxx_conn_destroy,
+	.set_param              = iscsi_set_param,
+	.get_conn_param		= qla4xxx_conn_get_param,
+	.get_session_param	= qla4xxx_session_get_param,
+	.get_ep_param           = qla4xxx_get_ep_param,
+	.ep_connect		= qla4xxx_ep_connect,
+	.ep_poll		= qla4xxx_ep_poll,
+	.ep_disconnect		= qla4xxx_ep_disconnect,
+	.get_stats		= qla4xxx_conn_get_stats,
+	.send_pdu		= iscsi_conn_send_pdu,
+	.xmit_task		= qla4xxx_task_xmit,
+	.cleanup_task		= qla4xxx_task_cleanup,
+	.alloc_pdu		= qla4xxx_alloc_pdu,
+
+	.get_host_param		= qla4xxx_host_get_param,
+	.set_iface_param	= qla4xxx_iface_set_param,
+	.get_iface_param	= qla4xxx_get_iface_param,
+	.bsg_request		= qla4xxx_bsg_request,
+	.send_ping		= qla4xxx_send_ping,
+	.get_chap		= qla4xxx_get_chap_list,
+	.delete_chap		= qla4xxx_delete_chap,
+	.set_chap		= qla4xxx_set_chap_entry,
+	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
+	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
+	.new_flashnode		= qla4xxx_sysfs_ddb_add,
+	.del_flashnode		= qla4xxx_sysfs_ddb_delete,
+	.login_flashnode	= qla4xxx_sysfs_ddb_login,
+	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,
+	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid,
+	.get_host_stats		= qla4xxx_get_host_stats,
+};
+
+static struct scsi_transport_template *qla4xxx_scsi_transport;
+
+static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+{
+	u32 reg_val = 0;
+	int rval = QLA_SUCCESS;
+
+	if (is_qla8022(ha))
+		reg_val = readl(&ha->qla4_82xx_reg->host_status);
+	else if (is_qla8032(ha) || is_qla8042(ha))
+		reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+	else
+		reg_val = readw(&ha->reg->ctrl_status);
+
+	if (reg_val == QL4_ISP_REG_DISCONNECT)
+		rval = QLA_ERROR;
+
+	return rval;
+}
+
+static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
+			     uint32_t iface_type, uint32_t payload_size,
+			     uint32_t pid, struct sockaddr *dst_addr)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+	uint32_t options = 0;
+	uint8_t ipaddr[IPv6_ADDR_LEN];
+	int rval;
+
+	memset(ipaddr, 0, IPv6_ADDR_LEN);
+	/* IPv4 to IPv4 */
+	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
+	    (dst_addr->sa_family == AF_INET)) {
+		addr = (struct sockaddr_in *)dst_addr;
+		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
+				  "dest: %pI4\n", __func__,
+				  &ha->ip_config.ip_address, ipaddr));
+		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
+					 ipaddr);
+		if (rval)
+			rval = -EINVAL;
+	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
+		   (dst_addr->sa_family == AF_INET6)) {
+		/* IPv6 to IPv6 */
+		addr6 = (struct sockaddr_in6 *)dst_addr;
+		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
+
+		options |= PING_IPV6_PROTOCOL_ENABLE;
+
+		/* Ping using LinkLocal address */
+		if ((iface_num == 0) || (iface_num == 1)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
+					  "src: %pI6 dest: %pI6\n", __func__,
+					  &ha->ip_config.ipv6_link_local_addr,
+					  ipaddr));
+			options |= PING_IPV6_LINKLOCAL_ADDR;
+			rval = qla4xxx_ping_iocb(ha, options, payload_size,
+						 pid, ipaddr);
+		} else {
+			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
+				   "not supported\n", __func__, iface_num);
+			rval = -ENOSYS;
+			goto exit_send_ping;
+		}
+
+		/*
+		 * If ping using LinkLocal address fails, try ping using
+		 * IPv6 address
+		 */
+		if (rval != QLA_SUCCESS) {
+			options &= ~PING_IPV6_LINKLOCAL_ADDR;
+			if (iface_num == 0) {
+				options |= PING_IPV6_ADDR0;
+				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+						  "Ping src: %pI6 "
+						  "dest: %pI6\n", __func__,
+						  &ha->ip_config.ipv6_addr0,
+						  ipaddr));
+			} else if (iface_num == 1) {
+				options |= PING_IPV6_ADDR1;
+				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
+						  "Ping src: %pI6 "
+						  "dest: %pI6\n", __func__,
+						  &ha->ip_config.ipv6_addr1,
+						  ipaddr));
+			}
+			rval = qla4xxx_ping_iocb(ha, options, payload_size,
+						 pid, ipaddr);
+			if (rval)
+				rval = -EINVAL;
+		}
+	} else
+		rval = -ENOSYS;
+exit_send_ping:
+	return rval;
+}
+
+static umode_t qla4_attr_is_visible(int param_type, int param)
+{
+	switch (param_type) {
+	case ISCSI_HOST_PARAM:
+		switch (param) {
+		case ISCSI_HOST_PARAM_HWADDRESS:
+		case ISCSI_HOST_PARAM_IPADDRESS:
+		case ISCSI_HOST_PARAM_INITIATOR_NAME:
+		case ISCSI_HOST_PARAM_PORT_STATE:
+		case ISCSI_HOST_PARAM_PORT_SPEED:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
+	case ISCSI_PARAM:
+		switch (param) {
+		case ISCSI_PARAM_PERSISTENT_ADDRESS:
+		case ISCSI_PARAM_PERSISTENT_PORT:
+		case ISCSI_PARAM_CONN_ADDRESS:
+		case ISCSI_PARAM_CONN_PORT:
+		case ISCSI_PARAM_TARGET_NAME:
+		case ISCSI_PARAM_TPGT:
+		case ISCSI_PARAM_TARGET_ALIAS:
+		case ISCSI_PARAM_MAX_BURST:
+		case ISCSI_PARAM_MAX_R2T:
+		case ISCSI_PARAM_FIRST_BURST:
+		case ISCSI_PARAM_MAX_RECV_DLENGTH:
+		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+		case ISCSI_PARAM_IFACE_NAME:
+		case ISCSI_PARAM_CHAP_OUT_IDX:
+		case ISCSI_PARAM_CHAP_IN_IDX:
+		case ISCSI_PARAM_USERNAME:
+		case ISCSI_PARAM_PASSWORD:
+		case ISCSI_PARAM_USERNAME_IN:
+		case ISCSI_PARAM_PASSWORD_IN:
+		case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+		case ISCSI_PARAM_DISCOVERY_SESS:
+		case ISCSI_PARAM_PORTAL_TYPE:
+		case ISCSI_PARAM_CHAP_AUTH_EN:
+		case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+		case ISCSI_PARAM_BIDI_CHAP_EN:
+		case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+		case ISCSI_PARAM_DEF_TIME2WAIT:
+		case ISCSI_PARAM_DEF_TIME2RETAIN:
+		case ISCSI_PARAM_HDRDGST_EN:
+		case ISCSI_PARAM_DATADGST_EN:
+		case ISCSI_PARAM_INITIAL_R2T_EN:
+		case ISCSI_PARAM_IMM_DATA_EN:
+		case ISCSI_PARAM_PDU_INORDER_EN:
+		case ISCSI_PARAM_DATASEQ_INORDER_EN:
+		case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+		case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+		case ISCSI_PARAM_TCP_WSF_DISABLE:
+		case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+		case ISCSI_PARAM_TCP_TIMER_SCALE:
+		case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+		case ISCSI_PARAM_TCP_XMIT_WSF:
+		case ISCSI_PARAM_TCP_RECV_WSF:
+		case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+		case ISCSI_PARAM_IPV4_TOS:
+		case ISCSI_PARAM_IPV6_TC:
+		case ISCSI_PARAM_IPV6_FLOW_LABEL:
+		case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+		case ISCSI_PARAM_KEEPALIVE_TMO:
+		case ISCSI_PARAM_LOCAL_PORT:
+		case ISCSI_PARAM_ISID:
+		case ISCSI_PARAM_TSID:
+		case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+		case ISCSI_PARAM_ERL:
+		case ISCSI_PARAM_STATSN:
+		case ISCSI_PARAM_EXP_STATSN:
+		case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+		case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+		case ISCSI_PARAM_LOCAL_IPADDR:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
+	case ISCSI_NET_PARAM:
+		switch (param) {
+		case ISCSI_NET_PARAM_IPV4_ADDR:
+		case ISCSI_NET_PARAM_IPV4_SUBNET:
+		case ISCSI_NET_PARAM_IPV4_GW:
+		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+		case ISCSI_NET_PARAM_IFACE_ENABLE:
+		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+		case ISCSI_NET_PARAM_IPV6_ADDR:
+		case ISCSI_NET_PARAM_IPV6_ROUTER:
+		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+		case ISCSI_NET_PARAM_VLAN_ID:
+		case ISCSI_NET_PARAM_VLAN_PRIORITY:
+		case ISCSI_NET_PARAM_VLAN_ENABLED:
+		case ISCSI_NET_PARAM_MTU:
+		case ISCSI_NET_PARAM_PORT:
+		case ISCSI_NET_PARAM_IPADDR_STATE:
+		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
+		case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
+		case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+		case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+		case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+		case ISCSI_NET_PARAM_TCP_WSF:
+		case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+		case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+		case ISCSI_NET_PARAM_CACHE_ID:
+		case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+		case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+		case ISCSI_NET_PARAM_IPV4_TOS_EN:
+		case ISCSI_NET_PARAM_IPV4_TOS:
+		case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+		case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+		case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+		case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+		case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+		case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+		case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+		case ISCSI_NET_PARAM_REDIRECT_EN:
+		case ISCSI_NET_PARAM_IPV4_TTL:
+		case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+		case ISCSI_NET_PARAM_IPV6_MLD_EN:
+		case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+		case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+		case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+		case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+		case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+		case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+		case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+		case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
+	case ISCSI_IFACE_PARAM:
+		switch (param) {
+		case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+		case ISCSI_IFACE_PARAM_HDRDGST_EN:
+		case ISCSI_IFACE_PARAM_DATADGST_EN:
+		case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+		case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+		case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+		case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+		case ISCSI_IFACE_PARAM_ERL:
+		case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+		case ISCSI_IFACE_PARAM_FIRST_BURST:
+		case ISCSI_IFACE_PARAM_MAX_R2T:
+		case ISCSI_IFACE_PARAM_MAX_BURST:
+		case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+		case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+		case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+		case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+		case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+		case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
+	case ISCSI_FLASHNODE_PARAM:
+		switch (param) {
+		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+		case ISCSI_FLASHNODE_PORTAL_TYPE:
+		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+		case ISCSI_FLASHNODE_DISCOVERY_SESS:
+		case ISCSI_FLASHNODE_ENTRY_EN:
+		case ISCSI_FLASHNODE_HDR_DGST_EN:
+		case ISCSI_FLASHNODE_DATA_DGST_EN:
+		case ISCSI_FLASHNODE_IMM_DATA_EN:
+		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+		case ISCSI_FLASHNODE_DATASEQ_INORDER:
+		case ISCSI_FLASHNODE_PDU_INORDER:
+		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+		case ISCSI_FLASHNODE_SNACK_REQ_EN:
+		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+		case ISCSI_FLASHNODE_ERL:
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+		case ISCSI_FLASHNODE_FIRST_BURST:
+		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+		case ISCSI_FLASHNODE_MAX_R2T:
+		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+		case ISCSI_FLASHNODE_ISID:
+		case ISCSI_FLASHNODE_TSID:
+		case ISCSI_FLASHNODE_PORT:
+		case ISCSI_FLASHNODE_MAX_BURST:
+		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+		case ISCSI_FLASHNODE_IPADDR:
+		case ISCSI_FLASHNODE_ALIAS:
+		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+		case ISCSI_FLASHNODE_LOCAL_PORT:
+		case ISCSI_FLASHNODE_IPV4_TOS:
+		case ISCSI_FLASHNODE_IPV6_TC:
+		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+		case ISCSI_FLASHNODE_NAME:
+		case ISCSI_FLASHNODE_TPGT:
+		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+		case ISCSI_FLASHNODE_TCP_RECV_WSF:
+		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+		case ISCSI_FLASHNODE_USERNAME:
+		case ISCSI_FLASHNODE_PASSWORD:
+		case ISCSI_FLASHNODE_STATSN:
+		case ISCSI_FLASHNODE_EXP_STATSN:
+		case ISCSI_FLASHNODE_IS_BOOT_TGT:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * qla4xxx_create chap_list - Create CHAP list from FLASH
+ * @ha: pointer to adapter structure
+ *
+ * Read flash and make a list of CHAP entries, during login when a CHAP entry
+ * is received, it will be checked in this list. If entry exist then the CHAP
+ * entry index is set in the DDB. If CHAP entry does not exist in this list
+ * then a new entry is added in FLASH in CHAP table and the index obtained is
+ * used in the DDB.
+ **/
+static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
+{
+	int rval = 0;
+	uint8_t *chap_flash_data = NULL;
+	uint32_t offset;
+	dma_addr_t chap_dma;
+	uint32_t chap_size = 0;
+
+	if (is_qla40XX(ha))
+		chap_size = MAX_CHAP_ENTRIES_40XX *
+			    sizeof(struct ql4_chap_table);
+	else	/* Single region contains CHAP info for both
+		 * ports which is divided into half for each port.
+		 */
+		chap_size = ha->hw.flt_chap_size / 2;
+
+	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
+					     &chap_dma, GFP_KERNEL);
+	if (!chap_flash_data) {
+		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
+		return;
+	}
+
+	if (is_qla40XX(ha)) {
+		offset = FLASH_CHAP_OFFSET;
+	} else {
+		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+		if (ha->port_num == 1)
+			offset += chap_size;
+	}
+
+	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+	if (rval != QLA_SUCCESS)
+		goto exit_chap_list;
+
+	if (ha->chap_list == NULL)
+		ha->chap_list = vmalloc(chap_size);
+	if (ha->chap_list == NULL) {
+		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
+		goto exit_chap_list;
+	}
+
+	memset(ha->chap_list, 0, chap_size);
+	memcpy(ha->chap_list, chap_flash_data, chap_size);
+
+exit_chap_list:
+	dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
+}
+
+static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
+				     int16_t chap_index,
+				     struct ql4_chap_table **chap_entry)
+{
+	int rval = QLA_ERROR;
+	int max_chap_entries;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+		rval = QLA_ERROR;
+		goto exit_get_chap;
+	}
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (chap_index > max_chap_entries) {
+		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+		rval = QLA_ERROR;
+		goto exit_get_chap;
+	}
+
+	*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
+	if ((*chap_entry)->cookie !=
+	     __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+		rval = QLA_ERROR;
+		*chap_entry = NULL;
+	} else {
+		rval = QLA_SUCCESS;
+	}
+
+exit_get_chap:
+	return rval;
+}
+
+/**
+ * qla4xxx_find_free_chap_index - Find the first free chap index
+ * @ha: pointer to adapter structure
+ * @chap_index: CHAP index to be returned
+ *
+ * Find the first free chap index available in the chap table
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
+					uint16_t *chap_index)
+{
+	int i, rval;
+	int free_index = -1;
+	int max_chap_entries = 0;
+	struct ql4_chap_table *chap_table;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+						sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+		rval = QLA_ERROR;
+		goto exit_find_chap;
+	}
+
+	for (i = 0; i < max_chap_entries; i++) {
+		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+
+		if ((chap_table->cookie !=
+		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+		   (i > MAX_RESRV_CHAP_IDX)) {
+				free_index = i;
+				break;
+		}
+	}
+
+	if (free_index != -1) {
+		*chap_index = free_index;
+		rval = QLA_SUCCESS;
+	} else {
+		rval = QLA_ERROR;
+	}
+
+exit_find_chap:
+	return rval;
+}
+
+static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
+				  uint32_t *num_entries, char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct ql4_chap_table *chap_table;
+	struct iscsi_chap_rec *chap_rec;
+	int max_chap_entries = 0;
+	int valid_chap_entries = 0;
+	int ret = 0, i;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+					sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
+			__func__, *num_entries, chap_tbl_idx);
+
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit_get_chap_list;
+	}
+
+	qla4xxx_create_chap_list(ha);
+
+	chap_rec = (struct iscsi_chap_rec *) buf;
+	mutex_lock(&ha->chap_sem);
+	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
+		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+		if (chap_table->cookie !=
+		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
+			continue;
+
+		chap_rec->chap_tbl_idx = i;
+		strlcpy(chap_rec->username, chap_table->name,
+			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
+		strlcpy(chap_rec->password, chap_table->secret,
+			QL4_CHAP_MAX_SECRET_LEN);
+		chap_rec->password_length = chap_table->secret_len;
+
+		if (chap_table->flags & BIT_7) /* local */
+			chap_rec->chap_type = CHAP_TYPE_OUT;
+
+		if (chap_table->flags & BIT_6) /* peer */
+			chap_rec->chap_type = CHAP_TYPE_IN;
+
+		chap_rec++;
+
+		valid_chap_entries++;
+		if (valid_chap_entries == *num_entries)
+			break;
+		else
+			continue;
+	}
+	mutex_unlock(&ha->chap_sem);
+
+exit_get_chap_list:
+	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
+			__func__,  valid_chap_entries);
+	*num_entries = valid_chap_entries;
+	return ret;
+}
+
+static int __qla4xxx_is_chap_active(struct device *dev, void *data)
+{
+	int ret = 0;
+	uint16_t *chap_tbl_idx = (uint16_t *) data;
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+
+	if (!iscsi_is_session_dev(dev))
+		goto exit_is_chap_active;
+
+	cls_session = iscsi_dev_to_session(dev);
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+
+	if (iscsi_session_chkready(cls_session))
+		goto exit_is_chap_active;
+
+	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
+		ret = 1;
+
+exit_is_chap_active:
+	return ret;
+}
+
+static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
+				  uint16_t chap_tbl_idx)
+{
+	int ret = 0;
+
+	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
+				    __qla4xxx_is_chap_active);
+
+	return ret;
+}
+
+static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct ql4_chap_table *chap_table;
+	dma_addr_t chap_dma;
+	int max_chap_entries = 0;
+	uint32_t offset = 0;
+	uint32_t chap_size;
+	int ret = 0;
+
+	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+	if (chap_table == NULL)
+		return -ENOMEM;
+
+	memset(chap_table, 0, sizeof(struct ql4_chap_table));
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (chap_tbl_idx > max_chap_entries) {
+		ret = -EINVAL;
+		goto exit_delete_chap;
+	}
+
+	/* Check if chap index is in use.
+	 * If chap is in use don't delet chap entry */
+	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
+	if (ret) {
+		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
+			   "delete from flash\n", chap_tbl_idx);
+		ret = -EBUSY;
+		goto exit_delete_chap;
+	}
+
+	chap_size = sizeof(struct ql4_chap_table);
+	if (is_qla40XX(ha))
+		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
+	else {
+		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+		/* flt_chap_size is CHAP table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			offset += (ha->hw.flt_chap_size / 2);
+		offset += (chap_tbl_idx * chap_size);
+	}
+
+	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
+	if (ret != QLA_SUCCESS) {
+		ret = -EINVAL;
+		goto exit_delete_chap;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
+			  __le16_to_cpu(chap_table->cookie)));
+
+	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
+		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
+		goto exit_delete_chap;
+	}
+
+	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
+
+	offset = FLASH_CHAP_OFFSET |
+			(chap_tbl_idx * sizeof(struct ql4_chap_table));
+	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
+				FLASH_OPT_RMW_COMMIT);
+	if (ret == QLA_SUCCESS && ha->chap_list) {
+		mutex_lock(&ha->chap_sem);
+		/* Update ha chap_list cache */
+		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
+			chap_table, sizeof(struct ql4_chap_table));
+		mutex_unlock(&ha->chap_sem);
+	}
+	if (ret != QLA_SUCCESS)
+		ret =  -EINVAL;
+
+exit_delete_chap:
+	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
+	return ret;
+}
+
+/**
+ * qla4xxx_set_chap_entry - Make chap entry with given information
+ * @shost: pointer to host
+ * @data: chap info - credentials, index and type to make chap entry
+ * @len: length of data
+ *
+ * Add or update chap entry with the given information
+ **/
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_chap_rec chap_rec;
+	struct ql4_chap_table *chap_entry = NULL;
+	struct iscsi_param_info *param_info;
+	struct nlattr *attr;
+	int max_chap_entries = 0;
+	int type;
+	int rem = len;
+	int rc = 0;
+	int size;
+
+	memset(&chap_rec, 0, sizeof(chap_rec));
+
+	nla_for_each_attr(attr, data, len, rem) {
+		param_info = nla_data(attr);
+
+		switch (param_info->param) {
+		case ISCSI_CHAP_PARAM_INDEX:
+			chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
+			break;
+		case ISCSI_CHAP_PARAM_CHAP_TYPE:
+			chap_rec.chap_type = param_info->value[0];
+			break;
+		case ISCSI_CHAP_PARAM_USERNAME:
+			size = min_t(size_t, sizeof(chap_rec.username),
+				     param_info->len);
+			memcpy(chap_rec.username, param_info->value, size);
+			break;
+		case ISCSI_CHAP_PARAM_PASSWORD:
+			size = min_t(size_t, sizeof(chap_rec.password),
+				     param_info->len);
+			memcpy(chap_rec.password, param_info->value, size);
+			break;
+		case ISCSI_CHAP_PARAM_PASSWORD_LEN:
+			chap_rec.password_length = param_info->value[0];
+			break;
+		default:
+			ql4_printk(KERN_ERR, ha,
+				   "%s: No such sysfs attribute\n", __func__);
+			rc = -ENOSYS;
+			goto exit_set_chap;
+		};
+	}
+
+	if (chap_rec.chap_type == CHAP_TYPE_IN)
+		type = BIDI_CHAP;
+	else
+		type = LOCAL_CHAP;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	mutex_lock(&ha->chap_sem);
+	if (chap_rec.chap_tbl_idx < max_chap_entries) {
+		rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
+					       &chap_entry);
+		if (!rc) {
+			if (!(type == qla4xxx_get_chap_type(chap_entry))) {
+				ql4_printk(KERN_INFO, ha,
+					   "Type mismatch for CHAP entry %d\n",
+					   chap_rec.chap_tbl_idx);
+				rc = -EINVAL;
+				goto exit_unlock_chap;
+			}
+
+			/* If chap index is in use then don't modify it */
+			rc = qla4xxx_is_chap_active(shost,
+						    chap_rec.chap_tbl_idx);
+			if (rc) {
+				ql4_printk(KERN_INFO, ha,
+					   "CHAP entry %d is in use\n",
+					   chap_rec.chap_tbl_idx);
+				rc = -EBUSY;
+				goto exit_unlock_chap;
+			}
+		}
+	} else {
+		rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
+		if (rc) {
+			ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
+			rc = -EBUSY;
+			goto exit_unlock_chap;
+		}
+	}
+
+	rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
+			      chap_rec.chap_tbl_idx, type);
+
+exit_unlock_chap:
+	mutex_unlock(&ha->chap_sem);
+
+exit_set_chap:
+	return rc;
+}
+
+
+static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_offload_host_stats *host_stats = NULL;
+	int host_stats_size;
+	int ret = 0;
+	int ddb_idx = 0;
+	struct ql_iscsi_stats *ql_iscsi_stats = NULL;
+	int stats_size;
+	dma_addr_t iscsi_stats_dma;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
+
+	host_stats_size = sizeof(struct iscsi_offload_host_stats);
+
+	if (host_stats_size != len) {
+		ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
+			   __func__, len, host_stats_size);
+		ret = -EINVAL;
+		goto exit_host_stats;
+	}
+	host_stats = (struct iscsi_offload_host_stats *)buf;
+
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit_host_stats;
+	}
+
+	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
+
+	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
+					    &iscsi_stats_dma, GFP_KERNEL);
+	if (!ql_iscsi_stats) {
+		ql4_printk(KERN_ERR, ha,
+			   "Unable to allocate memory for iscsi stats\n");
+		ret = -ENOMEM;
+		goto exit_host_stats;
+	}
+
+	ret =  qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
+				     iscsi_stats_dma);
+	if (ret != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha,
+			   "Unable to retrieve iscsi stats\n");
+		ret = -EIO;
+		goto exit_host_stats;
+	}
+	host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
+	host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
+	host_stats->mactx_multicast_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
+	host_stats->mactx_broadcast_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
+	host_stats->mactx_pause_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
+	host_stats->mactx_control_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
+	host_stats->mactx_deferral =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
+	host_stats->mactx_excess_deferral =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
+	host_stats->mactx_late_collision =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
+	host_stats->mactx_abort	= le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
+	host_stats->mactx_single_collision =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
+	host_stats->mactx_multiple_collision =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
+	host_stats->mactx_collision =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
+	host_stats->mactx_frames_dropped =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
+	host_stats->mactx_jumbo_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
+	host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
+	host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
+	host_stats->macrx_unknown_control_frames =
+		le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
+	host_stats->macrx_pause_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
+	host_stats->macrx_control_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
+	host_stats->macrx_dribble =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
+	host_stats->macrx_frame_length_error =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
+	host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
+	host_stats->macrx_carrier_sense_error =
+		le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
+	host_stats->macrx_frame_discarded =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
+	host_stats->macrx_frames_dropped =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
+	host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
+	host_stats->mac_encoding_error =
+			le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
+	host_stats->macrx_length_error_large =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
+	host_stats->macrx_length_error_small =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
+	host_stats->macrx_multicast_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
+	host_stats->macrx_broadcast_frames =
+			le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
+	host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
+	host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
+	host_stats->iptx_fragments =
+			le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
+	host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
+	host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
+	host_stats->iprx_fragments =
+			le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
+	host_stats->ip_datagram_reassembly =
+			le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
+	host_stats->ip_invalid_address_error =
+			le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
+	host_stats->ip_error_packets =
+			le64_to_cpu(ql_iscsi_stats->ip_error_packets);
+	host_stats->ip_fragrx_overlap =
+			le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
+	host_stats->ip_fragrx_outoforder =
+			le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
+	host_stats->ip_datagram_reassembly_timeout =
+		le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
+	host_stats->ipv6tx_packets =
+			le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
+	host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
+	host_stats->ipv6tx_fragments =
+			le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
+	host_stats->ipv6rx_packets =
+			le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
+	host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
+	host_stats->ipv6rx_fragments =
+			le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
+	host_stats->ipv6_datagram_reassembly =
+			le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
+	host_stats->ipv6_invalid_address_error =
+		le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
+	host_stats->ipv6_error_packets =
+			le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
+	host_stats->ipv6_fragrx_overlap =
+			le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
+	host_stats->ipv6_fragrx_outoforder =
+			le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
+	host_stats->ipv6_datagram_reassembly_timeout =
+		le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
+	host_stats->tcptx_segments =
+			le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
+	host_stats->tcptx_bytes	= le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
+	host_stats->tcprx_segments =
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
+	host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
+	host_stats->tcp_duplicate_ack_retx =
+			le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
+	host_stats->tcp_retx_timer_expired =
+			le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
+	host_stats->tcprx_duplicate_ack	=
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
+	host_stats->tcprx_pure_ackr =
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
+	host_stats->tcptx_delayed_ack =
+			le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
+	host_stats->tcptx_pure_ack =
+			le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
+	host_stats->tcprx_segment_error =
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
+	host_stats->tcprx_segment_outoforder =
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
+	host_stats->tcprx_window_probe =
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
+	host_stats->tcprx_window_update =
+			le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
+	host_stats->tcptx_window_probe_persist =
+		le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
+	host_stats->ecc_error_correction =
+			le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
+	host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
+	host_stats->iscsi_data_bytes_tx =
+			le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
+	host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
+	host_stats->iscsi_data_bytes_rx	=
+			le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
+	host_stats->iscsi_io_completed =
+			le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
+	host_stats->iscsi_unexpected_io_rx =
+			le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
+	host_stats->iscsi_format_error =
+			le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
+	host_stats->iscsi_hdr_digest_error =
+			le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
+	host_stats->iscsi_data_digest_error =
+			le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
+	host_stats->iscsi_sequence_error =
+			le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
+exit_host_stats:
+	if (ql_iscsi_stats)
+		dma_free_coherent(&ha->pdev->dev, host_stats_size,
+				  ql_iscsi_stats, iscsi_stats_dma);
+
+	ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
+		   __func__);
+	return ret;
+}
+
+static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
+				   enum iscsi_param_type param_type,
+				   int param, char *buf)
+{
+	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	int ival;
+	char *pval = NULL;
+	int len = -ENOSYS;
+
+	if (param_type == ISCSI_NET_PARAM) {
+		switch (param) {
+		case ISCSI_NET_PARAM_IPV4_ADDR:
+			len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
+			break;
+		case ISCSI_NET_PARAM_IPV4_SUBNET:
+			len = sprintf(buf, "%pI4\n",
+				      &ha->ip_config.subnet_mask);
+			break;
+		case ISCSI_NET_PARAM_IPV4_GW:
+			len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
+			break;
+		case ISCSI_NET_PARAM_IFACE_ENABLE:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(ha->ip_config.ipv4_options,
+					 IPOPT_IPV4_PROTOCOL_ENABLE, pval);
+			} else {
+				OP_STATE(ha->ip_config.ipv6_options,
+					 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
+			}
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+			len = sprintf(buf, "%s\n",
+				      (ha->ip_config.tcp_options &
+				       TCPOPT_DHCP_ENABLE) ?
+				      "dhcp" : "static");
+			break;
+		case ISCSI_NET_PARAM_IPV6_ADDR:
+			if (iface->iface_num == 0)
+				len = sprintf(buf, "%pI6\n",
+					      &ha->ip_config.ipv6_addr0);
+			if (iface->iface_num == 1)
+				len = sprintf(buf, "%pI6\n",
+					      &ha->ip_config.ipv6_addr1);
+			break;
+		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+			len = sprintf(buf, "%pI6\n",
+				      &ha->ip_config.ipv6_link_local_addr);
+			break;
+		case ISCSI_NET_PARAM_IPV6_ROUTER:
+			len = sprintf(buf, "%pI6\n",
+				      &ha->ip_config.ipv6_default_router_addr);
+			break;
+		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+			pval = (ha->ip_config.ipv6_addl_options &
+				IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
+				"nd" : "static";
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+			pval = (ha->ip_config.ipv6_addl_options &
+				IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
+				"auto" : "static";
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_VLAN_ID:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+				ival = ha->ip_config.ipv4_vlan_tag &
+				       ISCSI_MAX_VLAN_ID;
+			else
+				ival = ha->ip_config.ipv6_vlan_tag &
+				       ISCSI_MAX_VLAN_ID;
+
+			len = sprintf(buf, "%d\n", ival);
+			break;
+		case ISCSI_NET_PARAM_VLAN_PRIORITY:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+				ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
+				       ISCSI_MAX_VLAN_PRIORITY;
+			else
+				ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
+				       ISCSI_MAX_VLAN_PRIORITY;
+
+			len = sprintf(buf, "%d\n", ival);
+			break;
+		case ISCSI_NET_PARAM_VLAN_ENABLED:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(ha->ip_config.ipv4_options,
+					 IPOPT_VLAN_TAGGING_ENABLE, pval);
+			} else {
+				OP_STATE(ha->ip_config.ipv6_options,
+					 IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
+			}
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_MTU:
+			len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
+			break;
+		case ISCSI_NET_PARAM_PORT:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+				len = sprintf(buf, "%d\n",
+					      ha->ip_config.ipv4_port);
+			else
+				len = sprintf(buf, "%d\n",
+					      ha->ip_config.ipv6_port);
+			break;
+		case ISCSI_NET_PARAM_IPADDR_STATE:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				pval = iscsi_get_ipaddress_state_name(
+						ha->ip_config.ipv4_addr_state);
+			} else {
+				if (iface->iface_num == 0)
+					pval = iscsi_get_ipaddress_state_name(
+						ha->ip_config.ipv6_addr0_state);
+				else if (iface->iface_num == 1)
+					pval = iscsi_get_ipaddress_state_name(
+						ha->ip_config.ipv6_addr1_state);
+			}
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
+			pval = iscsi_get_ipaddress_state_name(
+					ha->ip_config.ipv6_link_local_state);
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
+			pval = iscsi_get_router_state_name(
+				      ha->ip_config.ipv6_default_router_state);
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(~ha->ip_config.tcp_options,
+					 TCPOPT_DELAYED_ACK_DISABLE, pval);
+			} else {
+				OP_STATE(~ha->ip_config.ipv6_tcp_options,
+					 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
+			}
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(~ha->ip_config.tcp_options,
+					 TCPOPT_NAGLE_ALGO_DISABLE, pval);
+			} else {
+				OP_STATE(~ha->ip_config.ipv6_tcp_options,
+					 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
+			}
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(~ha->ip_config.tcp_options,
+					 TCPOPT_WINDOW_SCALE_DISABLE, pval);
+			} else {
+				OP_STATE(~ha->ip_config.ipv6_tcp_options,
+					 IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
+					 pval);
+			}
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_TCP_WSF:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+				len = sprintf(buf, "%d\n",
+					      ha->ip_config.tcp_wsf);
+			else
+				len = sprintf(buf, "%d\n",
+					      ha->ip_config.ipv6_tcp_wsf);
+			break;
+		case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+				ival = (ha->ip_config.tcp_options &
+					TCPOPT_TIMER_SCALE) >> 1;
+			else
+				ival = (ha->ip_config.ipv6_tcp_options &
+					IPV6_TCPOPT_TIMER_SCALE) >> 1;
+
+			len = sprintf(buf, "%d\n", ival);
+			break;
+		case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(ha->ip_config.tcp_options,
+					 TCPOPT_TIMESTAMP_ENABLE, pval);
+			} else {
+				OP_STATE(ha->ip_config.ipv6_tcp_options,
+					 IPV6_TCPOPT_TIMESTAMP_EN, pval);
+			}
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_CACHE_ID:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+				len = sprintf(buf, "%d\n",
+					      ha->ip_config.ipv4_cache_id);
+			else
+				len = sprintf(buf, "%d\n",
+					      ha->ip_config.ipv6_cache_id);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+			OP_STATE(ha->ip_config.tcp_options,
+				 TCPOPT_DNS_SERVER_IP_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+			OP_STATE(ha->ip_config.tcp_options,
+				 TCPOPT_SLP_DA_INFO_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_TOS_EN:
+			OP_STATE(ha->ip_config.ipv4_options,
+				 IPOPT_IPV4_TOS_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_TOS:
+			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
+			break;
+		case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+			OP_STATE(ha->ip_config.ipv4_options,
+				 IPOPT_GRAT_ARP_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+			OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
+				 pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+			pval = (ha->ip_config.ipv4_alt_cid_len) ?
+			       (char *)ha->ip_config.ipv4_alt_cid : "";
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+			OP_STATE(ha->ip_config.ipv4_options,
+				 IPOPT_REQ_VID_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+			OP_STATE(ha->ip_config.ipv4_options,
+				 IPOPT_USE_VID_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+			pval = (ha->ip_config.ipv4_vid_len) ?
+			       (char *)ha->ip_config.ipv4_vid : "";
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+			OP_STATE(ha->ip_config.ipv4_options,
+				 IPOPT_LEARN_IQN_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+			OP_STATE(~ha->ip_config.ipv4_options,
+				 IPOPT_FRAGMENTATION_DISABLE, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+			OP_STATE(ha->ip_config.ipv4_options,
+				 IPOPT_IN_FORWARD_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_REDIRECT_EN:
+			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
+				OP_STATE(ha->ip_config.ipv4_options,
+					 IPOPT_ARP_REDIRECT_EN, pval);
+			} else {
+				OP_STATE(ha->ip_config.ipv6_options,
+					 IPV6_OPT_REDIRECT_EN, pval);
+			}
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV4_TTL:
+			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
+			break;
+		case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+			OP_STATE(ha->ip_config.ipv6_options,
+				 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV6_MLD_EN:
+			OP_STATE(ha->ip_config.ipv6_addl_options,
+				 IPV6_ADDOPT_MLD_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+			len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
+			break;
+		case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_traffic_class);
+			break;
+		case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_hop_limit);
+			break;
+		case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_nd_reach_time);
+			break;
+		case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_nd_rexmit_timer);
+			break;
+		case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_nd_stale_timeout);
+			break;
+		case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_dup_addr_detect_count);
+			break;
+		case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.ipv6_gw_advrt_mtu);
+			break;
+		default:
+			len = -ENOSYS;
+		}
+	} else if (param_type == ISCSI_IFACE_PARAM) {
+		switch (param) {
+		case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+			len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
+			break;
+		case ISCSI_IFACE_PARAM_HDRDGST_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_HEADER_DIGEST_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_DATADGST_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_DATA_DIGEST_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_INITIAL_R2T_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_ERL:
+			len = sprintf(buf, "%d\n",
+				      (ha->ip_config.iscsi_options &
+				       ISCSIOPTS_ERL));
+			break;
+		case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+			len = sprintf(buf, "%u\n",
+				      ha->ip_config.iscsi_max_pdu_size *
+				      BYTE_UNITS);
+			break;
+		case ISCSI_IFACE_PARAM_FIRST_BURST:
+			len = sprintf(buf, "%u\n",
+				      ha->ip_config.iscsi_first_burst_len *
+				      BYTE_UNITS);
+			break;
+		case ISCSI_IFACE_PARAM_MAX_R2T:
+			len = sprintf(buf, "%d\n",
+				      ha->ip_config.iscsi_max_outstnd_r2t);
+			break;
+		case ISCSI_IFACE_PARAM_MAX_BURST:
+			len = sprintf(buf, "%u\n",
+				      ha->ip_config.iscsi_max_burst_len *
+				      BYTE_UNITS);
+			break;
+		case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_CHAP_AUTH_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_BIDI_CHAP_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+			OP_STATE(ha->ip_config.iscsi_options,
+				 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
+
+			len = sprintf(buf, "%s\n", pval);
+			break;
+		case ISCSI_IFACE_PARAM_INITIATOR_NAME:
+			len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
+			break;
+		default:
+			len = -ENOSYS;
+		}
+	}
+
+	return len;
+}
+
+static struct iscsi_endpoint *
+qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+		   int non_blocking)
+{
+	int ret;
+	struct iscsi_endpoint *ep;
+	struct qla_endpoint *qla_ep;
+	struct scsi_qla_host *ha;
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+
+	if (!shost) {
+		ret = -ENXIO;
+		pr_err("%s: shost is NULL\n", __func__);
+		return ERR_PTR(ret);
+	}
+
+	ha = iscsi_host_priv(shost);
+	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
+	if (!ep) {
+		ret = -ENOMEM;
+		return ERR_PTR(ret);
+	}
+
+	qla_ep = ep->dd_data;
+	memset(qla_ep, 0, sizeof(struct qla_endpoint));
+	if (dst_addr->sa_family == AF_INET) {
+		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
+		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
+				  (char *)&addr->sin_addr));
+	} else if (dst_addr->sa_family == AF_INET6) {
+		memcpy(&qla_ep->dst_addr, dst_addr,
+		       sizeof(struct sockaddr_in6));
+		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
+				  (char *)&addr6->sin6_addr));
+	} else {
+		ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
+			   __func__);
+	}
+
+	qla_ep->host = shost;
+
+	return ep;
+}
+
+static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+	struct qla_endpoint *qla_ep;
+	struct scsi_qla_host *ha;
+	int ret = 0;
+
+	qla_ep = ep->dd_data;
+	ha = to_qla_host(qla_ep->host);
+	DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
+
+	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
+		ret = 1;
+
+	return ret;
+}
+
+static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
+{
+	struct qla_endpoint *qla_ep;
+	struct scsi_qla_host *ha;
+
+	qla_ep = ep->dd_data;
+	ha = to_qla_host(qla_ep->host);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
+	iscsi_destroy_endpoint(ep);
+}
+
+static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
+				enum iscsi_param param,
+				char *buf)
+{
+	struct qla_endpoint *qla_ep = ep->dd_data;
+	struct sockaddr *dst_addr;
+	struct scsi_qla_host *ha;
+
+	if (!qla_ep)
+		return -ENOTCONN;
+
+	ha = to_qla_host(qla_ep->host);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+	case ISCSI_PARAM_CONN_ADDRESS:
+		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
+		if (!dst_addr)
+			return -ENOTCONN;
+
+		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+						 &qla_ep->dst_addr, param, buf);
+	default:
+		return -ENOSYS;
+	}
+}
+
+static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+				   struct iscsi_stats *stats)
+{
+	struct iscsi_session *sess;
+	struct iscsi_cls_session *cls_sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	struct ql_iscsi_stats *ql_iscsi_stats;
+	int stats_size;
+	int ret;
+	dma_addr_t iscsi_stats_dma;
+
+	cls_sess = iscsi_conn_to_session(cls_conn);
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
+	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
+	/* Allocate memory */
+	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
+					    &iscsi_stats_dma, GFP_KERNEL);
+	if (!ql_iscsi_stats) {
+		ql4_printk(KERN_ERR, ha,
+			   "Unable to allocate memory for iscsi stats\n");
+		goto exit_get_stats;
+	}
+
+	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
+				     iscsi_stats_dma);
+	if (ret != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha,
+			   "Unable to retrieve iscsi stats\n");
+		goto free_stats;
+	}
+
+	/* octets */
+	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
+	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
+	/* xmit pdus */
+	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
+	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
+	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
+	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
+	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
+	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
+	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
+	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
+	/* recv pdus */
+	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
+	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
+	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
+	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
+	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
+	stats->logoutrsp_pdus =
+			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
+	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
+	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
+	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
+
+free_stats:
+	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
+			  iscsi_stats_dma);
+exit_get_stats:
+	return;
+}
+
+static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
+{
+	struct iscsi_cls_session *session;
+	struct iscsi_session *sess;
+	unsigned long flags;
+	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
+
+	session = starget_to_session(scsi_target(sc->device));
+	sess = session->dd_data;
+
+	spin_lock_irqsave(&session->lock, flags);
+	if (session->state == ISCSI_SESSION_FAILED)
+		ret = BLK_EH_RESET_TIMER;
+	spin_unlock_irqrestore(&session->lock, flags);
+
+	return ret;
+}
+
+static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_cls_host *ihost = shost->shost_data;
+	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
+
+	qla4xxx_get_firmware_state(ha);
+
+	switch (ha->addl_fw_state & 0x0F00) {
+	case FW_ADDSTATE_LINK_SPEED_10MBPS:
+		speed = ISCSI_PORT_SPEED_10MBPS;
+		break;
+	case FW_ADDSTATE_LINK_SPEED_100MBPS:
+		speed = ISCSI_PORT_SPEED_100MBPS;
+		break;
+	case FW_ADDSTATE_LINK_SPEED_1GBPS:
+		speed = ISCSI_PORT_SPEED_1GBPS;
+		break;
+	case FW_ADDSTATE_LINK_SPEED_10GBPS:
+		speed = ISCSI_PORT_SPEED_10GBPS;
+		break;
+	}
+	ihost->port_speed = speed;
+}
+
+static void qla4xxx_set_port_state(struct Scsi_Host *shost)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_cls_host *ihost = shost->shost_data;
+	uint32_t state = ISCSI_PORT_STATE_DOWN;
+
+	if (test_bit(AF_LINK_UP, &ha->flags))
+		state = ISCSI_PORT_STATE_UP;
+
+	ihost->port_state = state;
+}
+
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+				  enum iscsi_host_param param, char *buf)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	int len;
+
+	switch (param) {
+	case ISCSI_HOST_PARAM_HWADDRESS:
+		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
+		break;
+	case ISCSI_HOST_PARAM_IPADDRESS:
+		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
+		break;
+	case ISCSI_HOST_PARAM_INITIATOR_NAME:
+		len = sprintf(buf, "%s\n", ha->name_string);
+		break;
+	case ISCSI_HOST_PARAM_PORT_STATE:
+		qla4xxx_set_port_state(shost);
+		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+		break;
+	case ISCSI_HOST_PARAM_PORT_SPEED:
+		qla4xxx_set_port_speed(shost);
+		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return len;
+}
+
+static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
+{
+	if (ha->iface_ipv4)
+		return;
+
+	/* IPv4 */
+	ha->iface_ipv4 = iscsi_create_iface(ha->host,
+					    &qla4xxx_iscsi_transport,
+					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
+	if (!ha->iface_ipv4)
+		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
+			   "iface0.\n");
+}
+
+static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
+{
+	if (!ha->iface_ipv6_0)
+		/* IPv6 iface-0 */
+		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
+						      &qla4xxx_iscsi_transport,
+						      ISCSI_IFACE_TYPE_IPV6, 0,
+						      0);
+	if (!ha->iface_ipv6_0)
+		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
+			   "iface0.\n");
+
+	if (!ha->iface_ipv6_1)
+		/* IPv6 iface-1 */
+		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
+						      &qla4xxx_iscsi_transport,
+						      ISCSI_IFACE_TYPE_IPV6, 1,
+						      0);
+	if (!ha->iface_ipv6_1)
+		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
+			   "iface1.\n");
+}
+
+static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
+{
+	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
+		qla4xxx_create_ipv4_iface(ha);
+
+	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
+		qla4xxx_create_ipv6_iface(ha);
+}
+
+static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
+{
+	if (ha->iface_ipv4) {
+		iscsi_destroy_iface(ha->iface_ipv4);
+		ha->iface_ipv4 = NULL;
+	}
+}
+
+static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
+{
+	if (ha->iface_ipv6_0) {
+		iscsi_destroy_iface(ha->iface_ipv6_0);
+		ha->iface_ipv6_0 = NULL;
+	}
+	if (ha->iface_ipv6_1) {
+		iscsi_destroy_iface(ha->iface_ipv6_1);
+		ha->iface_ipv6_1 = NULL;
+	}
+}
+
+static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
+{
+	qla4xxx_destroy_ipv4_iface(ha);
+	qla4xxx_destroy_ipv6_iface(ha);
+}
+
+static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
+			     struct iscsi_iface_param_info *iface_param,
+			     struct addr_ctrl_blk *init_fw_cb)
+{
+	/*
+	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
+	 * iface_num 1 is valid only for IPv6 Addr.
+	 */
+	switch (iface_param->param) {
+	case ISCSI_NET_PARAM_IPV6_ADDR:
+		if (iface_param->iface_num & 0x1)
+			/* IPv6 Addr 1 */
+			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
+			       sizeof(init_fw_cb->ipv6_addr1));
+		else
+			/* IPv6 Addr 0 */
+			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
+			       sizeof(init_fw_cb->ipv6_addr0));
+		break;
+	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
+		if (iface_param->iface_num & 0x1)
+			break;
+		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
+		       sizeof(init_fw_cb->ipv6_if_id));
+		break;
+	case ISCSI_NET_PARAM_IPV6_ROUTER:
+		if (iface_param->iface_num & 0x1)
+			break;
+		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
+		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
+		break;
+	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
+		/* Autocfg applies to even interface */
+		if (iface_param->iface_num & 0x1)
+			break;
+
+		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
+			init_fw_cb->ipv6_addtl_opts &=
+				cpu_to_le16(
+				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
+		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
+			init_fw_cb->ipv6_addtl_opts |=
+				cpu_to_le16(
+				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
+		else
+			ql4_printk(KERN_ERR, ha,
+				   "Invalid autocfg setting for IPv6 addr\n");
+		break;
+	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
+		/* Autocfg applies to even interface */
+		if (iface_param->iface_num & 0x1)
+			break;
+
+		if (iface_param->value[0] ==
+		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
+			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
+					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
+		else if (iface_param->value[0] ==
+			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
+			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
+				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
+		else
+			ql4_printk(KERN_ERR, ha,
+				   "Invalid autocfg setting for IPv6 linklocal addr\n");
+		break;
+	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
+		/* Autocfg applies to even interface */
+		if (iface_param->iface_num & 0x1)
+			break;
+
+		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
+			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
+			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
+		break;
+	case ISCSI_NET_PARAM_IFACE_ENABLE:
+		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
+			init_fw_cb->ipv6_opts |=
+				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
+			qla4xxx_create_ipv6_iface(ha);
+		} else {
+			init_fw_cb->ipv6_opts &=
+				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
+					    0xFFFF);
+			qla4xxx_destroy_ipv6_iface(ha);
+		}
+		break;
+	case ISCSI_NET_PARAM_VLAN_TAG:
+		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
+			break;
+		init_fw_cb->ipv6_vlan_tag =
+				cpu_to_be16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_VLAN_ENABLED:
+		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
+			init_fw_cb->ipv6_opts |=
+				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
+		else
+			init_fw_cb->ipv6_opts &=
+				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
+		break;
+	case ISCSI_NET_PARAM_MTU:
+		init_fw_cb->eth_mtu_size =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_PORT:
+		/* Autocfg applies to even interface */
+		if (iface_param->iface_num & 0x1)
+			break;
+
+		init_fw_cb->ipv6_port =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv6_tcp_opts |=
+				cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
+		else
+			init_fw_cb->ipv6_tcp_opts &=
+				cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
+					    0xFFFF);
+		break;
+	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv6_tcp_opts |=
+				cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
+		else
+			init_fw_cb->ipv6_tcp_opts &=
+				cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
+		break;
+	case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv6_tcp_opts |=
+				cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
+		else
+			init_fw_cb->ipv6_tcp_opts &=
+				cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
+		break;
+	case ISCSI_NET_PARAM_TCP_WSF:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
+		break;
+	case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_tcp_opts &=
+					cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
+		init_fw_cb->ipv6_tcp_opts |=
+				cpu_to_le16((iface_param->value[0] << 1) &
+					    IPV6_TCPOPT_TIMER_SCALE);
+		break;
+	case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv6_tcp_opts |=
+				cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
+		else
+			init_fw_cb->ipv6_tcp_opts &=
+				cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv6_opts |=
+				cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
+		else
+			init_fw_cb->ipv6_opts &=
+				cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
+		break;
+	case ISCSI_NET_PARAM_REDIRECT_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv6_opts |=
+				cpu_to_le16(IPV6_OPT_REDIRECT_EN);
+		else
+			init_fw_cb->ipv6_opts &=
+				cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV6_MLD_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv6_addtl_opts |=
+				cpu_to_le16(IPV6_ADDOPT_MLD_EN);
+		else
+			init_fw_cb->ipv6_addtl_opts &=
+				cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_flow_lbl =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_traffic_class = iface_param->value[0];
+		break;
+	case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_hop_limit = iface_param->value[0];
+		break;
+	case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_nd_reach_time =
+				cpu_to_le32(*(uint32_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_nd_rexmit_timer =
+				cpu_to_le32(*(uint32_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_nd_stale_timeout =
+				cpu_to_le32(*(uint32_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
+		break;
+	case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv6_gw_advrt_mtu =
+				cpu_to_le32(*(uint32_t *)iface_param->value);
+		break;
+	default:
+		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
+			   iface_param->param);
+		break;
+	}
+}
+
+static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
+			     struct iscsi_iface_param_info *iface_param,
+			     struct addr_ctrl_blk *init_fw_cb)
+{
+	switch (iface_param->param) {
+	case ISCSI_NET_PARAM_IPV4_ADDR:
+		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
+		       sizeof(init_fw_cb->ipv4_addr));
+		break;
+	case ISCSI_NET_PARAM_IPV4_SUBNET:
+		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
+		       sizeof(init_fw_cb->ipv4_subnet));
+		break;
+	case ISCSI_NET_PARAM_IPV4_GW:
+		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
+		       sizeof(init_fw_cb->ipv4_gw_addr));
+		break;
+	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
+			init_fw_cb->ipv4_tcp_opts |=
+					cpu_to_le16(TCPOPT_DHCP_ENABLE);
+		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
+			init_fw_cb->ipv4_tcp_opts &=
+					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
+		else
+			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
+		break;
+	case ISCSI_NET_PARAM_IFACE_ENABLE:
+		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
+			init_fw_cb->ipv4_ip_opts |=
+				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
+			qla4xxx_create_ipv4_iface(ha);
+		} else {
+			init_fw_cb->ipv4_ip_opts &=
+				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
+					    0xFFFF);
+			qla4xxx_destroy_ipv4_iface(ha);
+		}
+		break;
+	case ISCSI_NET_PARAM_VLAN_TAG:
+		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
+			break;
+		init_fw_cb->ipv4_vlan_tag =
+				cpu_to_be16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_VLAN_ENABLED:
+		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
+		break;
+	case ISCSI_NET_PARAM_MTU:
+		init_fw_cb->eth_mtu_size =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_PORT:
+		init_fw_cb->ipv4_port =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_NET_PARAM_DELAYED_ACK_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
+		else
+			init_fw_cb->ipv4_tcp_opts &=
+				cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
+					    0xFFFF);
+		break;
+	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
+		else
+			init_fw_cb->ipv4_tcp_opts &=
+				cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
+		break;
+	case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
+		else
+			init_fw_cb->ipv4_tcp_opts &=
+				cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
+		break;
+	case ISCSI_NET_PARAM_TCP_WSF:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
+		break;
+	case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
+		init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16((iface_param->value[0] << 1) &
+					    TCPOPT_TIMER_SCALE);
+		break;
+	case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
+		else
+			init_fw_cb->ipv4_tcp_opts &=
+				cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
+		else
+			init_fw_cb->ipv4_tcp_opts &=
+				cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_tcp_opts |=
+				cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
+		else
+			init_fw_cb->ipv4_tcp_opts &=
+				cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_TOS_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+				cpu_to_le16(IPOPT_IPV4_TOS_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+				cpu_to_le16(~IPOPT_IPV4_TOS_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_TOS:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv4_tos = iface_param->value[0];
+		break;
+	case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+					cpu_to_le16(IPOPT_GRAT_ARP_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+					cpu_to_le16(~IPOPT_GRAT_ARP_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+				cpu_to_le16(IPOPT_ALT_CID_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+				cpu_to_le16(~IPOPT_ALT_CID_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
+		if (iface_param->iface_num & 0x1)
+			break;
+		memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
+		       (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
+		init_fw_cb->ipv4_dhcp_alt_cid_len =
+					strlen(init_fw_cb->ipv4_dhcp_alt_cid);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+					cpu_to_le16(IPOPT_REQ_VID_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+					cpu_to_le16(~IPOPT_REQ_VID_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+					cpu_to_le16(IPOPT_USE_VID_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+					cpu_to_le16(~IPOPT_USE_VID_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
+		if (iface_param->iface_num & 0x1)
+			break;
+		memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
+		       (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
+		init_fw_cb->ipv4_dhcp_vid_len =
+					strlen(init_fw_cb->ipv4_dhcp_vid);
+		break;
+	case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+					cpu_to_le16(IPOPT_LEARN_IQN_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+					cpu_to_le16(~IPOPT_LEARN_IQN_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
+			init_fw_cb->ipv4_ip_opts |=
+				cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+				cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
+		break;
+	case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+				cpu_to_le16(IPOPT_IN_FORWARD_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+				cpu_to_le16(~IPOPT_IN_FORWARD_EN);
+		break;
+	case ISCSI_NET_PARAM_REDIRECT_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->ipv4_ip_opts |=
+				cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
+		else
+			init_fw_cb->ipv4_ip_opts &=
+				cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
+		break;
+	case ISCSI_NET_PARAM_IPV4_TTL:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->ipv4_ttl = iface_param->value[0];
+		break;
+	default:
+		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
+			   iface_param->param);
+		break;
+	}
+}
+
+static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
+				    struct iscsi_iface_param_info *iface_param,
+				    struct addr_ctrl_blk *init_fw_cb)
+{
+	switch (iface_param->param) {
+	case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->def_timeout =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_IFACE_PARAM_HDRDGST_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
+		break;
+	case ISCSI_IFACE_PARAM_DATADGST_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
+		break;
+	case ISCSI_IFACE_PARAM_IMM_DATA_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
+		break;
+	case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
+		break;
+	case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
+		break;
+	case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
+		break;
+	case ISCSI_IFACE_PARAM_ERL:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
+		init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
+						      ISCSIOPTS_ERL);
+		break;
+	case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->iscsi_max_pdu_size =
+				cpu_to_le32(*(uint32_t *)iface_param->value) /
+				BYTE_UNITS;
+		break;
+	case ISCSI_IFACE_PARAM_FIRST_BURST:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->iscsi_fburst_len =
+				cpu_to_le32(*(uint32_t *)iface_param->value) /
+				BYTE_UNITS;
+		break;
+	case ISCSI_IFACE_PARAM_MAX_R2T:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->iscsi_max_outstnd_r2t =
+				cpu_to_le16(*(uint16_t *)iface_param->value);
+		break;
+	case ISCSI_IFACE_PARAM_MAX_BURST:
+		if (iface_param->iface_num & 0x1)
+			break;
+		init_fw_cb->iscsi_max_burst_len =
+				cpu_to_le32(*(uint32_t *)iface_param->value) /
+				BYTE_UNITS;
+		break;
+	case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
+		break;
+	case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
+		break;
+	case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
+		break;
+	case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
+		break;
+	case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
+		if (iface_param->iface_num & 0x1)
+			break;
+		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
+			init_fw_cb->iscsi_opts |=
+				cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
+		else
+			init_fw_cb->iscsi_opts &=
+				cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
+		break;
+	default:
+		ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
+			   iface_param->param);
+		break;
+	}
+}
+
+static void
+qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
+{
+	struct addr_ctrl_blk_def *acb;
+	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
+	memset(acb->reserved1, 0, sizeof(acb->reserved1));
+	memset(acb->reserved2, 0, sizeof(acb->reserved2));
+	memset(acb->reserved3, 0, sizeof(acb->reserved3));
+	memset(acb->reserved4, 0, sizeof(acb->reserved4));
+	memset(acb->reserved5, 0, sizeof(acb->reserved5));
+	memset(acb->reserved6, 0, sizeof(acb->reserved6));
+	memset(acb->reserved7, 0, sizeof(acb->reserved7));
+	memset(acb->reserved8, 0, sizeof(acb->reserved8));
+	memset(acb->reserved9, 0, sizeof(acb->reserved9));
+	memset(acb->reserved10, 0, sizeof(acb->reserved10));
+	memset(acb->reserved11, 0, sizeof(acb->reserved11));
+	memset(acb->reserved12, 0, sizeof(acb->reserved12));
+	memset(acb->reserved13, 0, sizeof(acb->reserved13));
+	memset(acb->reserved14, 0, sizeof(acb->reserved14));
+	memset(acb->reserved15, 0, sizeof(acb->reserved15));
+}
+
+static int
+qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	int rval = 0;
+	struct iscsi_iface_param_info *iface_param = NULL;
+	struct addr_ctrl_blk *init_fw_cb = NULL;
+	dma_addr_t init_fw_cb_dma;
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	uint32_t rem = len;
+	struct nlattr *attr;
+
+	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
+					sizeof(struct addr_ctrl_blk),
+					&init_fw_cb_dma, GFP_KERNEL);
+	if (!init_fw_cb) {
+		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
+			   __func__);
+		return -ENOMEM;
+	}
+
+	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+	memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
+		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
+		rval = -EIO;
+		goto exit_init_fw_cb;
+	}
+
+	nla_for_each_attr(attr, data, len, rem) {
+		iface_param = nla_data(attr);
+
+		if (iface_param->param_type == ISCSI_NET_PARAM) {
+			switch (iface_param->iface_type) {
+			case ISCSI_IFACE_TYPE_IPV4:
+				switch (iface_param->iface_num) {
+				case 0:
+					qla4xxx_set_ipv4(ha, iface_param,
+							 init_fw_cb);
+					break;
+				default:
+				/* Cannot have more than one IPv4 interface */
+					ql4_printk(KERN_ERR, ha,
+						   "Invalid IPv4 iface number = %d\n",
+						   iface_param->iface_num);
+					break;
+				}
+				break;
+			case ISCSI_IFACE_TYPE_IPV6:
+				switch (iface_param->iface_num) {
+				case 0:
+				case 1:
+					qla4xxx_set_ipv6(ha, iface_param,
+							 init_fw_cb);
+					break;
+				default:
+				/* Cannot have more than two IPv6 interface */
+					ql4_printk(KERN_ERR, ha,
+						   "Invalid IPv6 iface number = %d\n",
+						   iface_param->iface_num);
+					break;
+				}
+				break;
+			default:
+				ql4_printk(KERN_ERR, ha,
+					   "Invalid iface type\n");
+				break;
+			}
+		} else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
+				qla4xxx_set_iscsi_param(ha, iface_param,
+							init_fw_cb);
+		} else {
+			continue;
+		}
+	}
+
+	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
+
+	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
+				 sizeof(struct addr_ctrl_blk),
+				 FLASH_OPT_RMW_COMMIT);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
+			   __func__);
+		rval = -EIO;
+		goto exit_init_fw_cb;
+	}
+
+	rval = qla4xxx_disable_acb(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+			   __func__);
+		rval = -EIO;
+		goto exit_init_fw_cb;
+	}
+
+	wait_for_completion_timeout(&ha->disable_acb_comp,
+				    DISABLE_ACB_TOV * HZ);
+
+	qla4xxx_initcb_to_acb(init_fw_cb);
+
+	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
+			   __func__);
+		rval = -EIO;
+		goto exit_init_fw_cb;
+	}
+
+	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
+	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
+				  init_fw_cb_dma);
+
+exit_init_fw_cb:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
+			  init_fw_cb, init_fw_cb_dma);
+
+	return rval;
+}
+
+static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
+				     enum iscsi_param param, char *buf)
+{
+	struct iscsi_session *sess = cls_sess->dd_data;
+	struct ddb_entry *ddb_entry = sess->dd_data;
+	struct scsi_qla_host *ha = ddb_entry->ha;
+	struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
+	struct ql4_chap_table chap_tbl;
+	int rval, len;
+	uint16_t idx;
+
+	memset(&chap_tbl, 0, sizeof(chap_tbl));
+	switch (param) {
+	case ISCSI_PARAM_CHAP_IN_IDX:
+		rval = qla4xxx_get_chap_index(ha, sess->username_in,
+					      sess->password_in, BIDI_CHAP,
+					      &idx);
+		if (rval)
+			len = sprintf(buf, "\n");
+		else
+			len = sprintf(buf, "%hu\n", idx);
+		break;
+	case ISCSI_PARAM_CHAP_OUT_IDX:
+		if (ddb_entry->ddb_type == FLASH_DDB) {
+			if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+				idx = ddb_entry->chap_tbl_idx;
+				rval = QLA_SUCCESS;
+			} else {
+				rval = QLA_ERROR;
+			}
+		} else {
+			rval = qla4xxx_get_chap_index(ha, sess->username,
+						      sess->password,
+						      LOCAL_CHAP, &idx);
+		}
+		if (rval)
+			len = sprintf(buf, "\n");
+		else
+			len = sprintf(buf, "%hu\n", idx);
+		break;
+	case ISCSI_PARAM_USERNAME:
+	case ISCSI_PARAM_PASSWORD:
+		/* First, populate session username and password for FLASH DDB,
+		 * if not already done. This happens when session login fails
+		 * for a FLASH DDB.
+		 */
+		if (ddb_entry->ddb_type == FLASH_DDB &&
+		    ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
+		    !sess->username && !sess->password) {
+			idx = ddb_entry->chap_tbl_idx;
+			rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+							    chap_tbl.secret,
+							    idx);
+			if (!rval) {
+				iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+						(char *)chap_tbl.name,
+						strlen((char *)chap_tbl.name));
+				iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+						(char *)chap_tbl.secret,
+						chap_tbl.secret_len);
+			}
+		}
+		/* allow fall-through */
+	default:
+		return iscsi_session_get_param(cls_sess, param, buf);
+	}
+
+	return len;
+}
+
+static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
+				  enum iscsi_param param, char *buf)
+{
+	struct iscsi_conn *conn;
+	struct qla_conn *qla_conn;
+	struct sockaddr *dst_addr;
+
+	conn = cls_conn->dd_data;
+	qla_conn = conn->dd_data;
+	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
+
+	switch (param) {
+	case ISCSI_PARAM_CONN_PORT:
+	case ISCSI_PARAM_CONN_ADDRESS:
+		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+						 dst_addr, param, buf);
+	default:
+		return iscsi_conn_get_param(cls_conn, param, buf);
+	}
+}
+
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+	uint32_t mbx_sts = 0;
+	uint16_t tmp_ddb_index;
+	int ret;
+
+get_ddb_index:
+	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Free DDB index not available\n"));
+		ret = QLA_ERROR;
+		goto exit_get_ddb_index;
+	}
+
+	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+		goto get_ddb_index;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Found a free DDB index at %d\n", tmp_ddb_index));
+	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+	if (ret == QLA_ERROR) {
+		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+			ql4_printk(KERN_INFO, ha,
+				   "DDB index = %d not available trying next\n",
+				   tmp_ddb_index);
+			goto get_ddb_index;
+		}
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Free FW DDB not available\n"));
+	}
+
+	*ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+	return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+				   struct ddb_entry *ddb_entry,
+				   char *existing_ipaddr,
+				   char *user_ipaddr)
+{
+	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+	char formatted_ipaddr[DDB_IPADDR_LEN];
+	int status = QLA_SUCCESS, ret = 0;
+
+	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+			       '\0', NULL);
+		if (ret == 0) {
+			status = QLA_ERROR;
+			goto out_match;
+		}
+		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+	} else {
+		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+			       '\0', NULL);
+		if (ret == 0) {
+			status = QLA_ERROR;
+			goto out_match;
+		}
+		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+	}
+
+	if (strcmp(existing_ipaddr, formatted_ipaddr))
+		status = QLA_ERROR;
+
+out_match:
+	return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+				      struct iscsi_cls_conn *cls_conn)
+{
+	int idx = 0, max_ddbs, rval;
+	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+	struct iscsi_session *sess, *existing_sess;
+	struct iscsi_conn *conn, *existing_conn;
+	struct ddb_entry *ddb_entry;
+
+	sess = cls_sess->dd_data;
+	conn = cls_conn->dd_data;
+
+	if (sess->targetname == NULL ||
+	    conn->persistent_address == NULL ||
+	    conn->persistent_port == 0)
+		return QLA_ERROR;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		if (ddb_entry->ddb_type != FLASH_DDB)
+			continue;
+
+		existing_sess = ddb_entry->sess->dd_data;
+		existing_conn = ddb_entry->conn->dd_data;
+
+		if (existing_sess->targetname == NULL ||
+		    existing_conn->persistent_address == NULL ||
+		    existing_conn->persistent_port == 0)
+			continue;
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "IQN = %s User IQN = %s\n",
+				  existing_sess->targetname,
+				  sess->targetname));
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "IP = %s User IP = %s\n",
+				  existing_conn->persistent_address,
+				  conn->persistent_address));
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Port = %d User Port = %d\n",
+				  existing_conn->persistent_port,
+				  conn->persistent_port));
+
+		if (strcmp(existing_sess->targetname, sess->targetname))
+			continue;
+		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+					existing_conn->persistent_address,
+					conn->persistent_address);
+		if (rval == QLA_ERROR)
+			continue;
+		if (existing_conn->persistent_port != conn->persistent_port)
+			continue;
+		break;
+	}
+
+	if (idx == max_ddbs)
+		return QLA_ERROR;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Match found in fwdb sessions\n"));
+	return QLA_SUCCESS;
+}
+
+static struct iscsi_cls_session *
+qla4xxx_session_create(struct iscsi_endpoint *ep,
+			uint16_t cmds_max, uint16_t qdepth,
+			uint32_t initial_cmdsn)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct scsi_qla_host *ha;
+	struct qla_endpoint *qla_ep;
+	struct ddb_entry *ddb_entry;
+	uint16_t ddb_index;
+	struct iscsi_session *sess;
+	struct sockaddr *dst_addr;
+	int ret;
+
+	if (!ep) {
+		printk(KERN_ERR "qla4xxx: missing ep.\n");
+		return NULL;
+	}
+
+	qla_ep = ep->dd_data;
+	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
+	ha = to_qla_host(qla_ep->host);
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
+
+	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+	if (ret == QLA_ERROR)
+		return NULL;
+
+	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
+				       cmds_max, sizeof(struct ddb_entry),
+				       sizeof(struct ql4_task_data),
+				       initial_cmdsn, ddb_index);
+	if (!cls_sess)
+		return NULL;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ddb_entry->fw_ddb_index = ddb_index;
+	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+	ddb_entry->ha = ha;
+	ddb_entry->sess = cls_sess;
+	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+	ddb_entry->ddb_change = qla4xxx_ddb_change;
+	clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
+	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+	ha->tot_ddbs++;
+
+	return cls_sess;
+}
+
+static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	unsigned long flags, wtime;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t ddb_state;
+	int ret;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
+			  ha->host_no));
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto destroy_session;
+	}
+
+	wtime = jiffies + (HZ * LOGOUT_TOV);
+	do {
+		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+					      fw_ddb_entry, fw_ddb_entry_dma,
+					      NULL, NULL, &ddb_state, NULL,
+					      NULL, NULL);
+		if (ret == QLA_ERROR)
+			goto destroy_session;
+
+		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+		    (ddb_state == DDB_DS_SESSION_FAILED))
+			goto destroy_session;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+destroy_session:
+	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+	if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
+		clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qla4xxx_free_ddb(ha, ddb_entry);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	iscsi_session_teardown(cls_sess);
+
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+static struct iscsi_cls_conn *
+qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
+{
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
+				    conn_idx);
+	if (!cls_conn) {
+		pr_info("%s: Can not create connection for conn_idx = %u\n",
+			__func__, conn_idx);
+		return NULL;
+	}
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ddb_entry->conn = cls_conn;
+
+	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
+			  conn_idx));
+	return cls_conn;
+}
+
+static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
+			     struct iscsi_cls_conn *cls_conn,
+			     uint64_t transport_fd, int is_leading)
+{
+	struct iscsi_conn *conn;
+	struct qla_conn *qla_conn;
+	struct iscsi_endpoint *ep;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	struct iscsi_session *sess;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+			  cls_session->sid, cls_conn->cid));
+
+	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+		return -EINVAL;
+	ep = iscsi_lookup_endpoint(transport_fd);
+	if (!ep)
+		return -EINVAL;
+	conn = cls_conn->dd_data;
+	qla_conn = conn->dd_data;
+	qla_conn->qla_ep = ep->dd_data;
+	return 0;
+}
+
+static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t mbx_sts = 0;
+	int ret = 0;
+	int status = QLA_SUCCESS;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
+			  cls_sess->sid, cls_conn->cid));
+
+	/* Check if we have  matching FW DDB, if yes then do not
+	 * login to this target. This could cause target to logout previous
+	 * connection
+	 */
+	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+	if (ret == QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha,
+			   "Session already exist in FW.\n");
+		ret = -EEXIST;
+		goto exit_conn_start;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		ret = -ENOMEM;
+		goto exit_conn_start;
+	}
+
+	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
+	if (ret) {
+		/* If iscsid is stopped and started then no need to do
+		* set param again since ddb state will be already
+		* active and FW does not allow set ddb to an
+		* active session.
+		*/
+		if (mbx_sts)
+			if (ddb_entry->fw_ddb_device_state ==
+						DDB_DS_SESSION_ACTIVE) {
+				ddb_entry->unblock_sess(ddb_entry->sess);
+				goto exit_set_param;
+			}
+
+		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
+			   __func__, ddb_entry->fw_ddb_index);
+		goto exit_conn_start;
+	}
+
+	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+	if (status == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+			   sess->targetname);
+		ret = -EINVAL;
+		goto exit_conn_start;
+	}
+
+	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
+		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+
+	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
+		      ddb_entry->fw_ddb_device_state));
+
+exit_set_param:
+	ret = 0;
+
+exit_conn_start:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret;
+}
+
+static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+	struct iscsi_session *sess;
+	struct scsi_qla_host *ha;
+	struct ddb_entry *ddb_entry;
+	int options;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
+			  cls_conn->cid));
+
+	options = LOGOUT_OPTION_CLOSE_SESSION;
+	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
+		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+}
+
+static void qla4xxx_task_work(struct work_struct *wdata)
+{
+	struct ql4_task_data *task_data;
+	struct scsi_qla_host *ha;
+	struct passthru_status *sts;
+	struct iscsi_task *task;
+	struct iscsi_hdr *hdr;
+	uint8_t *data;
+	uint32_t data_len;
+	struct iscsi_conn *conn;
+	int hdr_len;
+	itt_t itt;
+
+	task_data = container_of(wdata, struct ql4_task_data, task_work);
+	ha = task_data->ha;
+	task = task_data->task;
+	sts = &task_data->sts;
+	hdr_len = sizeof(struct iscsi_hdr);
+
+	DEBUG3(printk(KERN_INFO "Status returned\n"));
+	DEBUG3(qla4xxx_dump_buffer(sts, 64));
+	DEBUG3(printk(KERN_INFO "Response buffer"));
+	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
+
+	conn = task->conn;
+
+	switch (sts->completionStatus) {
+	case PASSTHRU_STATUS_COMPLETE:
+		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
+		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
+		itt = sts->handle;
+		hdr->itt = itt;
+		data = task_data->resp_buffer + hdr_len;
+		data_len = task_data->resp_len - hdr_len;
+		iscsi_complete_pdu(conn, hdr, data, data_len);
+		break;
+	default:
+		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
+			   sts->completionStatus);
+		break;
+	}
+	return;
+}
+
+static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
+{
+	struct ql4_task_data *task_data;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	int hdr_len;
+
+	sess = task->conn->session;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	task_data = task->dd_data;
+	memset(task_data, 0, sizeof(struct ql4_task_data));
+
+	if (task->sc) {
+		ql4_printk(KERN_INFO, ha,
+			   "%s: SCSI Commands not implemented\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr_len = sizeof(struct iscsi_hdr);
+	task_data->ha = ha;
+	task_data->task = task;
+
+	if (task->data_count) {
+		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
+						     task->data_count,
+						     PCI_DMA_TODEVICE);
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+		      __func__, task->conn->max_recv_dlength, hdr_len));
+
+	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
+	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
+						    task_data->resp_len,
+						    &task_data->resp_dma,
+						    GFP_ATOMIC);
+	if (!task_data->resp_buffer)
+		goto exit_alloc_pdu;
+
+	task_data->req_len = task->data_count + hdr_len;
+	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
+						   task_data->req_len,
+						   &task_data->req_dma,
+						   GFP_ATOMIC);
+	if (!task_data->req_buffer)
+		goto exit_alloc_pdu;
+
+	task->hdr = task_data->req_buffer;
+
+	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
+
+	return 0;
+
+exit_alloc_pdu:
+	if (task_data->resp_buffer)
+		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
+				  task_data->resp_buffer, task_data->resp_dma);
+
+	if (task_data->req_buffer)
+		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
+				  task_data->req_buffer, task_data->req_dma);
+	return -ENOMEM;
+}
+
+static void qla4xxx_task_cleanup(struct iscsi_task *task)
+{
+	struct ql4_task_data *task_data;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	int hdr_len;
+
+	hdr_len = sizeof(struct iscsi_hdr);
+	sess = task->conn->session;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	task_data = task->dd_data;
+
+	if (task->data_count) {
+		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
+				 task->data_count, PCI_DMA_TODEVICE);
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
+		      __func__, task->conn->max_recv_dlength, hdr_len));
+
+	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
+			  task_data->resp_buffer, task_data->resp_dma);
+	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
+			  task_data->req_buffer, task_data->req_dma);
+	return;
+}
+
+static int qla4xxx_task_xmit(struct iscsi_task *task)
+{
+	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_session *sess = task->conn->session;
+	struct ddb_entry *ddb_entry = sess->dd_data;
+	struct scsi_qla_host *ha = ddb_entry->ha;
+
+	if (!sc)
+		return qla4xxx_send_passthru0(task);
+
+	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
+		   __func__);
+	return -ENOSYS;
+}
+
+static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
+					 struct iscsi_bus_flash_conn *conn,
+					 struct dev_db_entry *fw_ddb_entry)
+{
+	unsigned long options = 0;
+	int rc = 0;
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+	if (test_bit(OPT_IPV6_DEVICE, &options)) {
+		rc = iscsi_switch_str_param(&sess->portal_type,
+					    PORTAL_TYPE_IPV6);
+		if (rc)
+			goto exit_copy;
+	} else {
+		rc = iscsi_switch_str_param(&sess->portal_type,
+					    PORTAL_TYPE_IPV4);
+		if (rc)
+			goto exit_copy;
+	}
+
+	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+					      &options);
+	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+	sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
+
+	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+					    &options);
+	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+	conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
+	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+					     &options);
+	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+	sess->discovery_auth_optional =
+			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+	if (test_bit(ISCSIOPT_ERL1, &options))
+		sess->erl |= BIT_1;
+	if (test_bit(ISCSIOPT_ERL0, &options))
+		sess->erl |= BIT_0;
+
+	options = le16_to_cpu(fw_ddb_entry->tcp_options);
+	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+		conn->tcp_timer_scale |= BIT_3;
+	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+		conn->tcp_timer_scale |= BIT_2;
+	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+		conn->tcp_timer_scale |= BIT_1;
+
+	conn->tcp_timer_scale >>= 1;
+	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+	options = le16_to_cpu(fw_ddb_entry->ip_options);
+	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+	conn->max_recv_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+	conn->max_xmit_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+	sess->first_burst = BYTE_UNITS *
+			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+	sess->max_burst = BYTE_UNITS *
+				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+	conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
+	conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
+	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+	sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
+	sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
+	sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+
+	sess->default_taskmgmt_timeout =
+				le16_to_cpu(fw_ddb_entry->def_timeout);
+	conn->port = le16_to_cpu(fw_ddb_entry->port);
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+	if (!conn->ipaddress) {
+		rc = -ENOMEM;
+		goto exit_copy;
+	}
+
+	conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+	if (!conn->redirect_ipaddr) {
+		rc = -ENOMEM;
+		goto exit_copy;
+	}
+
+	memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+	memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
+
+	if (test_bit(OPT_IPV6_DEVICE, &options)) {
+		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
+
+		conn->link_local_ipv6_addr = kmemdup(
+					fw_ddb_entry->link_local_ipv6_addr,
+					IPv6_ADDR_LEN, GFP_KERNEL);
+		if (!conn->link_local_ipv6_addr) {
+			rc = -ENOMEM;
+			goto exit_copy;
+		}
+	} else {
+		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+	}
+
+	if (fw_ddb_entry->iscsi_name[0]) {
+		rc = iscsi_switch_str_param(&sess->targetname,
+					    (char *)fw_ddb_entry->iscsi_name);
+		if (rc)
+			goto exit_copy;
+	}
+
+	if (fw_ddb_entry->iscsi_alias[0]) {
+		rc = iscsi_switch_str_param(&sess->targetalias,
+					    (char *)fw_ddb_entry->iscsi_alias);
+		if (rc)
+			goto exit_copy;
+	}
+
+	COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+exit_copy:
+	return rc;
+}
+
+static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
+				       struct iscsi_bus_flash_conn *conn,
+				       struct dev_db_entry *fw_ddb_entry)
+{
+	uint16_t options;
+	int rc = 0;
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	SET_BITVAL(conn->is_fw_assigned_ipv6,  options, BIT_11);
+	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		options |= BIT_8;
+	else
+		options &= ~BIT_8;
+
+	SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
+	SET_BITVAL(sess->discovery_sess, options, BIT_4);
+	SET_BITVAL(sess->entry_state, options, BIT_3);
+	fw_ddb_entry->options = cpu_to_le16(options);
+
+	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+	SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
+	SET_BITVAL(conn->datadgst_en, options, BIT_12);
+	SET_BITVAL(sess->imm_data_en, options, BIT_11);
+	SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
+	SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
+	SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
+	SET_BITVAL(sess->chap_auth_en, options, BIT_7);
+	SET_BITVAL(conn->snack_req_en, options, BIT_6);
+	SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
+	SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
+	SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
+	SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
+	SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
+	fw_ddb_entry->iscsi_options = cpu_to_le16(options);
+
+	options = le16_to_cpu(fw_ddb_entry->tcp_options);
+	SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
+	SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
+	SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
+	SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
+	SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
+	SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
+	SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
+	fw_ddb_entry->tcp_options = cpu_to_le16(options);
+
+	options = le16_to_cpu(fw_ddb_entry->ip_options);
+	SET_BITVAL(conn->fragment_disable, options, BIT_4);
+	fw_ddb_entry->ip_options = cpu_to_le16(options);
+
+	fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
+	fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+			       cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
+	fw_ddb_entry->iscsi_max_snd_data_seg_len =
+			       cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
+	fw_ddb_entry->iscsi_first_burst_len =
+				cpu_to_le16(sess->first_burst / BYTE_UNITS);
+	fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
+					    BYTE_UNITS);
+	fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
+	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
+	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
+	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
+	fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+	fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
+	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
+	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
+	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
+	fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+	fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
+	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
+	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
+	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
+	fw_ddb_entry->port = cpu_to_le16(conn->port);
+	fw_ddb_entry->def_timeout =
+				cpu_to_le16(sess->default_taskmgmt_timeout);
+
+	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
+	else
+		fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
+
+	if (conn->ipaddress)
+		memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
+		       sizeof(fw_ddb_entry->ip_addr));
+
+	if (conn->redirect_ipaddr)
+		memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
+		       sizeof(fw_ddb_entry->tgt_addr));
+
+	if (conn->link_local_ipv6_addr)
+		memcpy(fw_ddb_entry->link_local_ipv6_addr,
+		       conn->link_local_ipv6_addr,
+		       sizeof(fw_ddb_entry->link_local_ipv6_addr));
+
+	if (sess->targetname)
+		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+		       sizeof(fw_ddb_entry->iscsi_name));
+
+	if (sess->targetalias)
+		memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
+		       sizeof(fw_ddb_entry->iscsi_alias));
+
+	COPY_ISID(fw_ddb_entry->isid, sess->isid);
+
+	return rc;
+}
+
+static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
+					     struct iscsi_session *sess,
+					     struct dev_db_entry *fw_ddb_entry)
+{
+	unsigned long options = 0;
+	uint16_t ddb_link;
+	uint16_t disc_parent;
+	char ip_addr[DDB_IPADDR_LEN];
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+					      &options);
+	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+
+	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+					    &options);
+	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+					     &options);
+	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+	sess->discovery_auth_optional =
+			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+	if (test_bit(ISCSIOPT_ERL1, &options))
+		sess->erl |= BIT_1;
+	if (test_bit(ISCSIOPT_ERL0, &options))
+		sess->erl |= BIT_0;
+
+	options = le16_to_cpu(fw_ddb_entry->tcp_options);
+	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+		conn->tcp_timer_scale |= BIT_3;
+	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+		conn->tcp_timer_scale |= BIT_2;
+	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+		conn->tcp_timer_scale |= BIT_1;
+
+	conn->tcp_timer_scale >>= 1;
+	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+	options = le16_to_cpu(fw_ddb_entry->ip_options);
+	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+	conn->max_recv_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+	conn->max_xmit_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+	sess->first_burst = BYTE_UNITS *
+			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+	sess->max_burst = BYTE_UNITS *
+				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+	conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+	conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
+	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+	COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+	if (ddb_link == DDB_ISNS)
+		disc_parent = ISCSI_DISC_PARENT_ISNS;
+	else if (ddb_link == DDB_NO_LINK)
+		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+	else if (ddb_link < MAX_DDB_ENTRIES)
+		disc_parent = ISCSI_DISC_PARENT_SENDTGT;
+	else
+		disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+
+	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
+			iscsi_get_discovery_parent_name(disc_parent), 0);
+
+	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+			(char *)fw_ddb_entry->iscsi_alias, 0);
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE) {
+		memset(ip_addr, 0, sizeof(ip_addr));
+		sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
+		iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
+				(char *)ip_addr, 0);
+	}
+}
+
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+				     struct dev_db_entry *fw_ddb_entry,
+				     struct iscsi_cls_session *cls_sess,
+				     struct iscsi_cls_conn *cls_conn)
+{
+	int buflen = 0;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct ql4_chap_table chap_tbl;
+	struct iscsi_conn *conn;
+	char ip_addr[DDB_IPADDR_LEN];
+	uint16_t options = 0;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	conn = cls_conn->dd_data;
+	memset(&chap_tbl, 0, sizeof(chap_tbl));
+
+	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
+	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
+
+	sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
+	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+	memset(ip_addr, 0, sizeof(ip_addr));
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE) {
+		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
+
+		memset(ip_addr, 0, sizeof(ip_addr));
+		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+	} else {
+		iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
+		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+	}
+
+	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+			(char *)ip_addr, buflen);
+	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+			(char *)fw_ddb_entry->iscsi_name, buflen);
+	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+			(char *)ha->name_string, buflen);
+
+	if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+		if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+						   chap_tbl.secret,
+						   ddb_entry->chap_tbl_idx)) {
+			iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+					(char *)chap_tbl.name,
+					strlen((char *)chap_tbl.name));
+			iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+					(char *)chap_tbl.secret,
+					chap_tbl.secret_len);
+		}
+	}
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+					     struct ddb_entry *ddb_entry)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_cls_conn *cls_conn;
+	uint32_t ddb_state;
+	dma_addr_t fw_ddb_entry_dma;
+	struct dev_db_entry *fw_ddb_entry;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto exit_session_conn_fwddb_param;
+	}
+
+	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+				    NULL, NULL, NULL) == QLA_ERROR) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+				  "get_ddb_entry for fw_ddb_index %d\n",
+				  ha->host_no, __func__,
+				  ddb_entry->fw_ddb_index));
+		goto exit_session_conn_fwddb_param;
+	}
+
+	cls_sess = ddb_entry->sess;
+
+	cls_conn = ddb_entry->conn;
+
+	/* Update params */
+	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
+				       struct ddb_entry *ddb_entry)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	uint32_t ddb_state;
+	dma_addr_t fw_ddb_entry_dma;
+	struct dev_db_entry *fw_ddb_entry;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto exit_session_conn_param;
+	}
+
+	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+				    NULL, NULL, NULL) == QLA_ERROR) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+				  "get_ddb_entry for fw_ddb_index %d\n",
+				  ha->host_no, __func__,
+				  ddb_entry->fw_ddb_index));
+		goto exit_session_conn_param;
+	}
+
+	cls_sess = ddb_entry->sess;
+	sess = cls_sess->dd_data;
+
+	cls_conn = ddb_entry->conn;
+	conn = cls_conn->dd_data;
+
+	/* Update timers after login */
+	ddb_entry->default_relogin_timeout =
+		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
+	ddb_entry->default_time2wait =
+				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+	/* Update params */
+	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+	qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
+
+	memcpy(sess->initiatorname, ha->name_string,
+	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/*
+ * Timer routines
+ */
+
+static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
+				unsigned long interval)
+{
+	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
+		     __func__, ha->host->host_no));
+	init_timer(&ha->timer);
+	ha->timer.expires = jiffies + interval * HZ;
+	ha->timer.data = (unsigned long)ha;
+	ha->timer.function = (void (*)(unsigned long))func;
+	add_timer(&ha->timer);
+	ha->timer_active = 1;
+}
+
+static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
+{
+	del_timer_sync(&ha->timer);
+	ha->timer_active = 0;
+}
+
+/***
+ * qla4xxx_mark_device_missing - blocks the session
+ * @cls_session: Pointer to the session to be blocked
+ * @ddb_entry: Pointer to device database entry
+ *
+ * This routine marks a device missing and close connection.
+ **/
+void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+	iscsi_block_session(cls_session);
+}
+
+/**
+ * qla4xxx_mark_all_devices_missing - mark all devices as missing.
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine marks a device missing and resets the relogin retry count.
+ **/
+void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
+{
+	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
+}
+
+static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
+				       struct ddb_entry *ddb_entry,
+				       struct scsi_cmnd *cmd)
+{
+	struct srb *srb;
+
+	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
+	if (!srb)
+		return srb;
+
+	kref_init(&srb->srb_ref);
+	srb->ha = ha;
+	srb->ddb = ddb_entry;
+	srb->cmd = cmd;
+	srb->flags = 0;
+	CMD_SP(cmd) = (void *)srb;
+
+	return srb;
+}
+
+static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
+{
+	struct scsi_cmnd *cmd = srb->cmd;
+
+	if (srb->flags & SRB_DMA_VALID) {
+		scsi_dma_unmap(cmd);
+		srb->flags &= ~SRB_DMA_VALID;
+	}
+	CMD_SP(cmd) = NULL;
+}
+
+void qla4xxx_srb_compl(struct kref *ref)
+{
+	struct srb *srb = container_of(ref, struct srb, srb_ref);
+	struct scsi_cmnd *cmd = srb->cmd;
+	struct scsi_qla_host *ha = srb->ha;
+
+	qla4xxx_srb_free_dma(ha, srb);
+
+	mempool_free(srb, ha->srb_mempool);
+
+	cmd->scsi_done(cmd);
+}
+
+/**
+ * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
+ * @host: scsi host
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * Remarks:
+ * This routine is invoked by Linux to send a SCSI command to the driver.
+ * The mid-level driver tries to ensure that queuecommand never gets
+ * invoked concurrently with itself or the interrupt handler (although
+ * the interrupt handler may call this routine as part of request-
+ * completion handling).   Unfortunely, it sometimes calls the scheduler
+ * in interrupt context which is a big NO! NO!.
+ **/
+static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+	struct scsi_qla_host *ha = to_qla_host(host);
+	struct ddb_entry *ddb_entry = cmd->device->hostdata;
+	struct iscsi_cls_session *sess = ddb_entry->sess;
+	struct srb *srb;
+	int rval;
+
+	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
+			cmd->result = DID_NO_CONNECT << 16;
+		else
+			cmd->result = DID_REQUEUE << 16;
+		goto qc_fail_command;
+	}
+
+	if (!sess) {
+		cmd->result = DID_IMM_RETRY << 16;
+		goto qc_fail_command;
+	}
+
+	rval = iscsi_session_chkready(sess);
+	if (rval) {
+		cmd->result = rval;
+		goto qc_fail_command;
+	}
+
+	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
+	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
+	    !test_bit(AF_ONLINE, &ha->flags) ||
+	    !test_bit(AF_LINK_UP, &ha->flags) ||
+	    test_bit(AF_LOOPBACK, &ha->flags) ||
+	    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
+	    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
+		goto qc_host_busy;
+
+	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
+	if (!srb)
+		goto qc_host_busy;
+
+	rval = qla4xxx_send_command_to_isp(ha, srb);
+	if (rval != QLA_SUCCESS)
+		goto qc_host_busy_free_sp;
+
+	return 0;
+
+qc_host_busy_free_sp:
+	qla4xxx_srb_free_dma(ha, srb);
+	mempool_free(srb, ha->srb_mempool);
+
+qc_host_busy:
+	return SCSI_MLQUEUE_HOST_BUSY;
+
+qc_fail_command:
+	cmd->scsi_done(cmd);
+
+	return 0;
+}
+
+/**
+ * qla4xxx_mem_free - frees memory allocated to adapter
+ * @ha: Pointer to host adapter structure.
+ *
+ * Frees memory previously allocated by qla4xxx_mem_alloc
+ **/
+static void qla4xxx_mem_free(struct scsi_qla_host *ha)
+{
+	if (ha->queues)
+		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
+				  ha->queues_dma);
+
+	if (ha->fw_dump)
+		vfree(ha->fw_dump);
+
+	ha->queues_len = 0;
+	ha->queues = NULL;
+	ha->queues_dma = 0;
+	ha->request_ring = NULL;
+	ha->request_dma = 0;
+	ha->response_ring = NULL;
+	ha->response_dma = 0;
+	ha->shadow_regs = NULL;
+	ha->shadow_regs_dma = 0;
+	ha->fw_dump = NULL;
+	ha->fw_dump_size = 0;
+
+	/* Free srb pool. */
+	if (ha->srb_mempool)
+		mempool_destroy(ha->srb_mempool);
+
+	ha->srb_mempool = NULL;
+
+	if (ha->chap_dma_pool)
+		dma_pool_destroy(ha->chap_dma_pool);
+
+	if (ha->chap_list)
+		vfree(ha->chap_list);
+	ha->chap_list = NULL;
+
+	if (ha->fw_ddb_dma_pool)
+		dma_pool_destroy(ha->fw_ddb_dma_pool);
+
+	/* release io space registers  */
+	if (is_qla8022(ha)) {
+		if (ha->nx_pcibase)
+			iounmap(
+			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		if (ha->nx_pcibase)
+			iounmap(
+			    (struct device_reg_83xx __iomem *)ha->nx_pcibase);
+	} else if (ha->reg) {
+		iounmap(ha->reg);
+	}
+
+	if (ha->reset_tmplt.buff)
+		vfree(ha->reset_tmplt.buff);
+
+	pci_release_regions(ha->pdev);
+}
+
+/**
+ * qla4xxx_mem_alloc - allocates memory for use by adapter.
+ * @ha: Pointer to host adapter structure
+ *
+ * Allocates DMA memory for request and response queues. Also allocates memory
+ * for srbs.
+ **/
+static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
+{
+	unsigned long align;
+
+	/* Allocate contiguous block of DMA memory for queues. */
+	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
+			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
+			  sizeof(struct shadow_regs) +
+			  MEM_ALIGN_VALUE +
+			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
+					&ha->queues_dma, GFP_KERNEL);
+	if (ha->queues == NULL) {
+		ql4_printk(KERN_WARNING, ha,
+		    "Memory Allocation failed - queues.\n");
+
+		goto mem_alloc_error_exit;
+	}
+	memset(ha->queues, 0, ha->queues_len);
+
+	/*
+	 * As per RISC alignment requirements -- the bus-address must be a
+	 * multiple of the request-ring size (in bytes).
+	 */
+	align = 0;
+	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
+		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
+					   (MEM_ALIGN_VALUE - 1));
+
+	/* Update request and response queue pointers. */
+	ha->request_dma = ha->queues_dma + align;
+	ha->request_ring = (struct queue_entry *) (ha->queues + align);
+	ha->response_dma = ha->queues_dma + align +
+		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
+	ha->response_ring = (struct queue_entry *) (ha->queues + align +
+						    (REQUEST_QUEUE_DEPTH *
+						     QUEUE_SIZE));
+	ha->shadow_regs_dma = ha->queues_dma + align +
+		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
+		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
+	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
+						  (REQUEST_QUEUE_DEPTH *
+						   QUEUE_SIZE) +
+						  (RESPONSE_QUEUE_DEPTH *
+						   QUEUE_SIZE));
+
+	/* Allocate memory for srb pool. */
+	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
+					 mempool_free_slab, srb_cachep);
+	if (ha->srb_mempool == NULL) {
+		ql4_printk(KERN_WARNING, ha,
+		    "Memory Allocation failed - SRB Pool.\n");
+
+		goto mem_alloc_error_exit;
+	}
+
+	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
+					    CHAP_DMA_BLOCK_SIZE, 8, 0);
+
+	if (ha->chap_dma_pool == NULL) {
+		ql4_printk(KERN_WARNING, ha,
+		    "%s: chap_dma_pool allocation failed..\n", __func__);
+		goto mem_alloc_error_exit;
+	}
+
+	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+					      DDB_DMA_BLOCK_SIZE, 8, 0);
+
+	if (ha->fw_ddb_dma_pool == NULL) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s: fw_ddb_dma_pool allocation failed..\n",
+			   __func__);
+		goto mem_alloc_error_exit;
+	}
+
+	return QLA_SUCCESS;
+
+mem_alloc_error_exit:
+	return QLA_ERROR;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+	uint32_t temp, temp_state, temp_val;
+	int status = QLA_SUCCESS;
+
+	temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
+
+	temp_state = qla82xx_get_temp_state(temp);
+	temp_val = qla82xx_get_temp_val(temp);
+
+	if (temp_state == QLA82XX_TEMP_PANIC) {
+		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+			   " exceeds maximum allowed. Hardware has been shut"
+			   " down.\n", temp_val);
+		status = QLA_ERROR;
+	} else if (temp_state == QLA82XX_TEMP_WARN) {
+		if (ha->temperature == QLA82XX_TEMP_NORMAL)
+			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+				   " degrees C exceeds operating range."
+				   " Immediate action needed.\n", temp_val);
+	} else {
+		if (ha->temperature == QLA82XX_TEMP_WARN)
+			ql4_printk(KERN_INFO, ha, "Device temperature is"
+				   " now %d degrees C in normal range.\n",
+				   temp_val);
+	}
+	ha->temperature = temp_state;
+	return status;
+}
+
+/**
+ * qla4_8xxx_check_fw_alive  - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+{
+	uint32_t fw_heartbeat_counter;
+	int status = QLA_SUCCESS;
+
+	fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
+						   QLA8XXX_PEG_ALIVE_COUNTER);
+	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+	if (fw_heartbeat_counter == 0xffffffff) {
+		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
+		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+		    ha->host_no, __func__));
+		return status;
+	}
+
+	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
+		ha->seconds_since_last_heartbeat++;
+		/* FW not alive after 2 seconds */
+		if (ha->seconds_since_last_heartbeat == 2) {
+			ha->seconds_since_last_heartbeat = 0;
+			qla4_8xxx_dump_peg_reg(ha);
+			status = QLA_ERROR;
+		}
+	} else
+		ha->seconds_since_last_heartbeat = 0;
+
+	ha->fw_heartbeat_counter = fw_heartbeat_counter;
+	return status;
+}
+
+static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
+{
+	uint32_t halt_status;
+	int halt_status_unrecoverable = 0;
+
+	halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
+
+	if (is_qla8022(ha)) {
+		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+			   __func__);
+		qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+				CRB_NIU_XG_PAUSE_CTL_P0 |
+				CRB_NIU_XG_PAUSE_CTL_P1);
+
+		if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
+			ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
+				   __func__);
+		if (halt_status & HALT_STATUS_UNRECOVERABLE)
+			halt_status_unrecoverable = 1;
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
+			ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
+				   __func__);
+		else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
+			halt_status_unrecoverable = 1;
+	}
+
+	/*
+	 * Since we cannot change dev_state in interrupt context,
+	 * set appropriate DPC flag then wakeup DPC
+	 */
+	if (halt_status_unrecoverable) {
+		set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+	} else {
+		ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
+			   __func__);
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+	}
+	qla4xxx_mailbox_premature_completion(ha);
+	qla4xxx_wake_dpc(ha);
+}
+
+/**
+ * qla4_8xxx_watchdog - Poll dev state
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
+{
+	uint32_t dev_state;
+	uint32_t idc_ctrl;
+
+	if (is_qla8032(ha) &&
+	    (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
+		WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
+			  __func__, ha->func_num);
+
+	/* don't poll if reset is going on */
+	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
+		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
+
+		if (qla4_8xxx_check_temp(ha)) {
+			if (is_qla8022(ha)) {
+				ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
+				qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+						CRB_NIU_XG_PAUSE_CTL_P0 |
+						CRB_NIU_XG_PAUSE_CTL_P1);
+			}
+			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+			qla4xxx_wake_dpc(ha);
+		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+			   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+
+			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
+				   __func__);
+
+			if (is_qla8032(ha) || is_qla8042(ha)) {
+				idc_ctrl = qla4_83xx_rd_reg(ha,
+							QLA83XX_IDC_DRV_CTRL);
+				if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
+					ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
+						   __func__);
+					qla4xxx_mailbox_premature_completion(
+									    ha);
+				}
+			}
+
+			if ((is_qla8032(ha) || is_qla8042(ha)) ||
+			    (is_qla8022(ha) && !ql4xdontresethba)) {
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
+				qla4xxx_wake_dpc(ha);
+			}
+		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
+			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
+			    __func__);
+			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
+			qla4xxx_wake_dpc(ha);
+		} else  {
+			/* Check firmware health */
+			if (qla4_8xxx_check_fw_alive(ha))
+				qla4_8xxx_process_fw_error(ha);
+		}
+	}
+}
+
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	if (!(ddb_entry->ddb_type == FLASH_DDB))
+		return;
+
+	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+	    !iscsi_is_session_online(cls_sess)) {
+		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+		    INVALID_ENTRY) {
+			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+					0) {
+				atomic_set(&ddb_entry->retry_relogin_timer,
+					   INVALID_ENTRY);
+				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+				set_bit(DF_RELOGIN, &ddb_entry->flags);
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+				       "%s: index [%d] login device\n",
+					__func__, ddb_entry->fw_ddb_index));
+			} else
+				atomic_dec(&ddb_entry->retry_relogin_timer);
+		}
+	}
+
+	/* Wait for relogin to timeout */
+	if (atomic_read(&ddb_entry->relogin_timer) &&
+	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+		/*
+		 * If the relogin times out and the device is
+		 * still NOT ONLINE then try and relogin again.
+		 */
+		if (!iscsi_is_session_online(cls_sess)) {
+			/* Reset retry relogin timer */
+			atomic_inc(&ddb_entry->relogin_retry_count);
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+				"%s: index[%d] relogin timed out-retrying"
+				" relogin (%d), retry (%d)\n", __func__,
+				ddb_entry->fw_ddb_index,
+				atomic_read(&ddb_entry->relogin_retry_count),
+				ddb_entry->default_time2wait + 4));
+			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+			atomic_set(&ddb_entry->retry_relogin_timer,
+				   ddb_entry->default_time2wait + 4);
+		}
+	}
+}
+
+/**
+ * qla4xxx_timer - checks every second for work to do.
+ * @ha: Pointer to host adapter structure.
+ **/
+static void qla4xxx_timer(struct scsi_qla_host *ha)
+{
+	int start_dpc = 0;
+	uint16_t w;
+
+	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
+	/* If we are in the middle of AER/EEH processing
+	 * skip any processing and reschedule the timer
+	 */
+	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+		mod_timer(&ha->timer, jiffies + HZ);
+		return;
+	}
+
+	/* Hardware read to trigger an EEH error during mailbox waits. */
+	if (!pci_channel_offline(ha->pdev))
+		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+
+	if (is_qla80XX(ha))
+		qla4_8xxx_watchdog(ha);
+
+	if (is_qla40XX(ha)) {
+		/* Check for heartbeat interval. */
+		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
+		    ha->heartbeat_interval != 0) {
+			ha->seconds_since_last_heartbeat++;
+			if (ha->seconds_since_last_heartbeat >
+			    ha->heartbeat_interval + 2)
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		}
+	}
+
+	/* Process any deferred work. */
+	if (!list_empty(&ha->work_list))
+		start_dpc++;
+
+	/* Wakeup the dpc routine for this adapter, if needed. */
+	if (start_dpc ||
+	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
+	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
+	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
+	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
+	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
+	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
+	     test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
+	     test_bit(DPC_AEN, &ha->dpc_flags)) {
+		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
+			      " - dpc flags = 0x%lx\n",
+			      ha->host_no, __func__, ha->dpc_flags));
+		qla4xxx_wake_dpc(ha);
+	}
+
+	/* Reschedule timer thread to call us back in one second */
+	mod_timer(&ha->timer, jiffies + HZ);
+
+	DEBUG2(ha->seconds_since_last_intr++);
+}
+
+/**
+ * qla4xxx_cmd_wait - waits for all outstanding commands to complete
+ * @ha: Pointer to host adapter structure.
+ *
+ * This routine stalls the driver until all outstanding commands are returned.
+ * Caller must release the Hardware Lock prior to calling this routine.
+ **/
+static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
+{
+	uint32_t index = 0;
+	unsigned long flags;
+	struct scsi_cmnd *cmd;
+	unsigned long wtime;
+	uint32_t wtmo;
+
+	if (is_qla40XX(ha))
+		wtmo = WAIT_CMD_TOV;
+	else
+		wtmo = ha->nx_reset_timeout / 2;
+
+	wtime = jiffies + (wtmo * HZ);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Wait up to %u seconds for cmds to complete\n",
+			  wtmo));
+
+	while (!time_after_eq(jiffies, wtime)) {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		/* Find a command that hasn't completed. */
+		for (index = 0; index < ha->host->can_queue; index++) {
+			cmd = scsi_host_find_tag(ha->host, index);
+			/*
+			 * We cannot just check if the index is valid,
+			 * becase if we are run from the scsi eh, then
+			 * the scsi/block layer is going to prevent
+			 * the tag from being released.
+			 */
+			if (cmd != NULL && CMD_SP(cmd))
+				break;
+		}
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		/* If No Commands are pending, wait is complete */
+		if (index == ha->host->can_queue)
+			return QLA_SUCCESS;
+
+		msleep(1000);
+	}
+	/* If we timed out on waiting for commands to come back
+	 * return ERROR. */
+	return QLA_ERROR;
+}
+
+int qla4xxx_hw_reset(struct scsi_qla_host *ha)
+{
+	uint32_t ctrl_status;
+	unsigned long flags = 0;
+
+	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
+
+	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
+		return QLA_ERROR;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/*
+	 * If the SCSI Reset Interrupt bit is set, clear it.
+	 * Otherwise, the Soft Reset won't work.
+	 */
+	ctrl_status = readw(&ha->reg->ctrl_status);
+	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
+		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
+
+	/* Issue Soft Reset */
+	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
+	readl(&ha->reg->ctrl_status);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_soft_reset - performs soft reset.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_soft_reset(struct scsi_qla_host *ha)
+{
+	uint32_t max_wait_time;
+	unsigned long flags = 0;
+	int status;
+	uint32_t ctrl_status;
+
+	status = qla4xxx_hw_reset(ha);
+	if (status != QLA_SUCCESS)
+		return status;
+
+	status = QLA_ERROR;
+	/* Wait until the Network Reset Intr bit is cleared */
+	max_wait_time = RESET_INTR_TOV;
+	do {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		ctrl_status = readw(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
+			break;
+
+		msleep(1000);
+	} while ((--max_wait_time));
+
+	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
+		DEBUG2(printk(KERN_WARNING
+			      "scsi%ld: Network Reset Intr not cleared by "
+			      "Network function, clearing it now!\n",
+			      ha->host_no));
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	/* Wait until the firmware tells us the Soft Reset is done */
+	max_wait_time = SOFT_RESET_TOV;
+	do {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		ctrl_status = readw(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
+			status = QLA_SUCCESS;
+			break;
+		}
+
+		msleep(1000);
+	} while ((--max_wait_time));
+
+	/*
+	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
+	 * after the soft reset has taken place.
+	 */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ctrl_status = readw(&ha->reg->ctrl_status);
+	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
+		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* If soft reset fails then most probably the bios on other
+	 * function is also enabled.
+	 * Since the initialization is sequential the other fn
+	 * wont be able to acknowledge the soft reset.
+	 * Issue a force soft reset to workaround this scenario.
+	 */
+	if (max_wait_time == 0) {
+		/* Issue Force Soft Reset */
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		/* Wait until the firmware tells us the Soft Reset is done */
+		max_wait_time = SOFT_RESET_TOV;
+		do {
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			ctrl_status = readw(&ha->reg->ctrl_status);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
+				status = QLA_SUCCESS;
+				break;
+			}
+
+			msleep(1000);
+		} while ((--max_wait_time));
+	}
+
+	return status;
+}
+
+/**
+ * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
+ * @ha: Pointer to host adapter structure.
+ * @res: returned scsi status
+ *
+ * This routine is called just prior to a HARD RESET to return all
+ * outstanding commands back to the Operating System.
+ * Caller should make sure that the following locks are released
+ * before this calling routine: Hardware lock, and io_request_lock.
+ **/
+static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
+{
+	struct srb *srb;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (i = 0; i < ha->host->can_queue; i++) {
+		srb = qla4xxx_del_from_active_array(ha, i);
+		if (srb != NULL) {
+			srb->cmd->result = res;
+			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
+{
+	clear_bit(AF_ONLINE, &ha->flags);
+
+	/* Disable the board */
+	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
+
+	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+	qla4xxx_mark_all_devices_missing(ha);
+	clear_bit(AF_INIT_DONE, &ha->flags);
+}
+
+static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
+
+	if (ddb_entry->ddb_type == FLASH_DDB)
+		iscsi_block_session(ddb_entry->sess);
+	else
+		iscsi_session_failure(cls_session->dd_data,
+				      ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * qla4xxx_recover_adapter - recovers adapter after a fatal error
+ * @ha: Pointer to host adapter structure.
+ **/
+static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
+{
+	int status = QLA_ERROR;
+	uint8_t reset_chip = 0;
+	uint32_t dev_state;
+	unsigned long wait;
+
+	/* Stall incoming I/O until we are done */
+	scsi_block_requests(ha->host);
+	clear_bit(AF_ONLINE, &ha->flags);
+	clear_bit(AF_LINK_UP, &ha->flags);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
+
+	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+
+	if ((is_qla8032(ha) || is_qla8042(ha)) &&
+	    !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
+		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+			   __func__);
+		/* disable pause frame for ISP83xx */
+		qla4_83xx_disable_pause(ha);
+	}
+
+	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
+
+	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+		reset_chip = 1;
+
+	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
+	 * do not reset adapter, jump to initialize_adapter */
+	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+		status = QLA_SUCCESS;
+		goto recover_ha_init_adapter;
+	}
+
+	/* For the ISP-8xxx adapter, issue a stop_firmware if invoked
+	 * from eh_host_reset or ioctl module */
+	if (is_qla80XX(ha) && !reset_chip &&
+	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+		    "scsi%ld: %s - Performing stop_firmware...\n",
+		    ha->host_no, __func__));
+		status = ha->isp_ops->reset_firmware(ha);
+		if (status == QLA_SUCCESS) {
+			ha->isp_ops->disable_intrs(ha);
+			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+		} else {
+			/* If the stop_firmware fails then
+			 * reset the entire chip */
+			reset_chip = 1;
+			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		}
+	}
+
+	/* Issue full chip reset if recovering from a catastrophic error,
+	 * or if stop_firmware fails for ISP-8xxx.
+	 * This is the default case for ISP-4xxx */
+	if (is_qla40XX(ha) || reset_chip) {
+		if (is_qla40XX(ha))
+			goto chip_reset;
+
+		/* Check if 8XXX firmware is alive or not
+		 * We may have arrived here from NEED_RESET
+		 * detection only */
+		if (test_bit(AF_FW_RECOVERY, &ha->flags))
+			goto chip_reset;
+
+		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+		while (time_before(jiffies, wait)) {
+			if (qla4_8xxx_check_fw_alive(ha)) {
+				qla4xxx_mailbox_premature_completion(ha);
+				break;
+			}
+
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(HZ);
+		}
+chip_reset:
+		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+			qla4xxx_cmd_wait(ha);
+
+		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+		    "scsi%ld: %s - Performing chip reset..\n",
+		    ha->host_no, __func__));
+		status = ha->isp_ops->reset_chip(ha);
+		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+	}
+
+	/* Flush any pending ddb changed AENs */
+	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+
+recover_ha_init_adapter:
+	/* Upon successful firmware/chip reset, re-initialize the adapter */
+	if (status == QLA_SUCCESS) {
+		/* For ISP-4xxx, force function 1 to always initialize
+		 * before function 3 to prevent both funcions from
+		 * stepping on top of the other */
+		if (is_qla40XX(ha) && (ha->mac_index == 3))
+			ssleep(6);
+
+		/* NOTE: AF_ONLINE flag set upon successful completion of
+		 * qla4xxx_initialize_adapter */
+		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+		if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+			status = qla4_8xxx_check_init_adapter_retry(ha);
+			if (status == QLA_ERROR) {
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
+					   ha->host_no, __func__);
+				qla4xxx_dead_adapter_cleanup(ha);
+				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA_FW_CONTEXT,
+					  &ha->dpc_flags);
+				goto exit_recover;
+			}
+		}
+	}
+
+	/* Retry failed adapter initialization, if necessary
+	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
+	 * case to prevent ping-pong resets between functions */
+	if (!test_bit(AF_ONLINE, &ha->flags) &&
+	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+		/* Adapter initialization failed, see if we can retry
+		 * resetting the ha.
+		 * Since we don't want to block the DPC for too long
+		 * with multiple resets in the same thread,
+		 * utilize DPC to retry */
+		if (is_qla80XX(ha)) {
+			ha->isp_ops->idc_lock(ha);
+			dev_state = qla4_8xxx_rd_direct(ha,
+							QLA8XXX_CRB_DEV_STATE);
+			ha->isp_ops->idc_unlock(ha);
+			if (dev_state == QLA8XXX_DEV_FAILED) {
+				ql4_printk(KERN_INFO, ha, "%s: don't retry "
+					   "recover adapter. H/W is in Failed "
+					   "state\n", __func__);
+				qla4xxx_dead_adapter_cleanup(ha);
+				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA_FW_CONTEXT,
+						&ha->dpc_flags);
+				status = QLA_ERROR;
+
+				goto exit_recover;
+			}
+		}
+
+		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
+			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
+			DEBUG2(printk("scsi%ld: recover adapter - retrying "
+				      "(%d) more times\n", ha->host_no,
+				      ha->retry_reset_ha_cnt));
+			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+			status = QLA_ERROR;
+		} else {
+			if (ha->retry_reset_ha_cnt > 0) {
+				/* Schedule another Reset HA--DPC will retry */
+				ha->retry_reset_ha_cnt--;
+				DEBUG2(printk("scsi%ld: recover adapter - "
+					      "retry remaining %d\n",
+					      ha->host_no,
+					      ha->retry_reset_ha_cnt));
+				status = QLA_ERROR;
+			}
+
+			if (ha->retry_reset_ha_cnt == 0) {
+				/* Recover adapter retries have been exhausted.
+				 * Adapter DEAD */
+				DEBUG2(printk("scsi%ld: recover adapter "
+					      "failed - board disabled\n",
+					      ha->host_no));
+				qla4xxx_dead_adapter_cleanup(ha);
+				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+				clear_bit(DPC_RESET_HA_FW_CONTEXT,
+					  &ha->dpc_flags);
+				status = QLA_ERROR;
+			}
+		}
+	} else {
+		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+	}
+
+exit_recover:
+	ha->adapter_error_count++;
+
+	if (test_bit(AF_ONLINE, &ha->flags))
+		ha->isp_ops->enable_intrs(ha);
+
+	scsi_unblock_requests(ha->host);
+
+	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
+	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
+
+	return status;
+}
+
+static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	if (!iscsi_is_session_online(cls_session)) {
+		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+				   " unblock session\n", ha->host_no, __func__,
+				   ddb_entry->fw_ddb_index);
+			iscsi_unblock_session(ddb_entry->sess);
+		} else {
+			/* Trigger relogin */
+			if (ddb_entry->ddb_type == FLASH_DDB) {
+				if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
+				      test_bit(DF_DISABLE_RELOGIN,
+					       &ddb_entry->flags)))
+					qla4xxx_arm_relogin_timer(ddb_entry);
+			} else
+				iscsi_session_failure(cls_session->dd_data,
+						      ISCSI_ERR_CONN_FAILED);
+		}
+	}
+}
+
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+		   " unblock session\n", ha->host_no, __func__,
+		   ddb_entry->fw_ddb_index);
+
+	iscsi_unblock_session(ddb_entry->sess);
+
+	/* Start scan target */
+	if (test_bit(AF_ONLINE, &ha->flags)) {
+		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+			   " start scan\n", ha->host_no, __func__,
+			   ddb_entry->fw_ddb_index);
+		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+	}
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+	int status = QLA_SUCCESS;
+
+	sess = cls_session->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+		   " unblock user space session\n", ha->host_no, __func__,
+		   ddb_entry->fw_ddb_index);
+
+	if (!iscsi_is_session_online(cls_session)) {
+		iscsi_conn_start(ddb_entry->conn);
+		iscsi_conn_login_event(ddb_entry->conn,
+				       ISCSI_CONN_STATE_LOGGED_IN);
+	} else {
+		ql4_printk(KERN_INFO, ha,
+			   "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
+			   ha->host_no, __func__, ddb_entry->fw_ddb_index,
+			   cls_session->sid);
+		status = QLA_ERROR;
+	}
+
+	return status;
+}
+
+static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
+{
+	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
+}
+
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+	uint16_t relogin_timer;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	relogin_timer = max(ddb_entry->default_relogin_timeout,
+			    (uint16_t)RELOGIN_TOV);
+	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+			  ddb_entry->fw_ddb_index, relogin_timer));
+
+	qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry;
+	struct scsi_qla_host *ha;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	if (!(ddb_entry->ddb_type == FLASH_DDB))
+		return;
+
+	if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+		return;
+
+	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+	    !iscsi_is_session_online(cls_sess)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "relogin issued\n"));
+		qla4xxx_relogin_flash_ddb(cls_sess);
+	}
+}
+
+void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
+{
+	if (ha->dpc_thread)
+		queue_work(ha->dpc_thread, &ha->dpc_work);
+}
+
+static struct qla4_work_evt *
+qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
+		   enum qla4_work_type type)
+{
+	struct qla4_work_evt *e;
+	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
+
+	e = kzalloc(size, GFP_ATOMIC);
+	if (!e)
+		return NULL;
+
+	INIT_LIST_HEAD(&e->list);
+	e->type = type;
+	return e;
+}
+
+static void qla4xxx_post_work(struct scsi_qla_host *ha,
+			     struct qla4_work_evt *e)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->work_lock, flags);
+	list_add_tail(&e->list, &ha->work_list);
+	spin_unlock_irqrestore(&ha->work_lock, flags);
+	qla4xxx_wake_dpc(ha);
+}
+
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+			  enum iscsi_host_event_code aen_code,
+			  uint32_t data_size, uint8_t *data)
+{
+	struct qla4_work_evt *e;
+
+	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
+	if (!e)
+		return QLA_ERROR;
+
+	e->u.aen.code = aen_code;
+	e->u.aen.data_size = data_size;
+	memcpy(e->u.aen.data, data, data_size);
+
+	qla4xxx_post_work(ha, e);
+
+	return QLA_SUCCESS;
+}
+
+int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
+			       uint32_t status, uint32_t pid,
+			       uint32_t data_size, uint8_t *data)
+{
+	struct qla4_work_evt *e;
+
+	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
+	if (!e)
+		return QLA_ERROR;
+
+	e->u.ping.status = status;
+	e->u.ping.pid = pid;
+	e->u.ping.data_size = data_size;
+	memcpy(e->u.ping.data, data, data_size);
+
+	qla4xxx_post_work(ha, e);
+
+	return QLA_SUCCESS;
+}
+
+static void qla4xxx_do_work(struct scsi_qla_host *ha)
+{
+	struct qla4_work_evt *e, *tmp;
+	unsigned long flags;
+	LIST_HEAD(work);
+
+	spin_lock_irqsave(&ha->work_lock, flags);
+	list_splice_init(&ha->work_list, &work);
+	spin_unlock_irqrestore(&ha->work_lock, flags);
+
+	list_for_each_entry_safe(e, tmp, &work, list) {
+		list_del_init(&e->list);
+
+		switch (e->type) {
+		case QLA4_EVENT_AEN:
+			iscsi_post_host_event(ha->host_no,
+					      &qla4xxx_iscsi_transport,
+					      e->u.aen.code,
+					      e->u.aen.data_size,
+					      e->u.aen.data);
+			break;
+		case QLA4_EVENT_PING_STATUS:
+			iscsi_ping_comp_event(ha->host_no,
+					      &qla4xxx_iscsi_transport,
+					      e->u.ping.status,
+					      e->u.ping.pid,
+					      e->u.ping.data_size,
+					      e->u.ping.data);
+			break;
+		default:
+			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
+				   "supported", e->type);
+		}
+		kfree(e);
+	}
+}
+
+/**
+ * qla4xxx_do_dpc - dpc routine
+ * @data: in our case pointer to adapter structure
+ *
+ * This routine is a task that is schedule by the interrupt handler
+ * to perform the background processing for interrupts.  We put it
+ * on a task queue that is consumed whenever the scheduler runs; that's
+ * so you can do anything (i.e. put the process to sleep etc).  In fact,
+ * the mid-level tries to sleep when it reaches the driver threshold
+ * "host->can_queue". This can cause a panic if we were in our interrupt code.
+ **/
+static void qla4xxx_do_dpc(struct work_struct *work)
+{
+	struct scsi_qla_host *ha =
+		container_of(work, struct scsi_qla_host, dpc_work);
+	int status = QLA_ERROR;
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+			  ha->host_no, __func__, ha->flags, ha->dpc_flags));
+
+	/* Initialization not yet finished. Don't do anything yet. */
+	if (!test_bit(AF_INIT_DONE, &ha->flags))
+		return;
+
+	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
+		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
+		    ha->host_no, __func__, ha->flags));
+		return;
+	}
+
+	/* post events to application */
+	qla4xxx_do_work(ha);
+
+	if (is_qla80XX(ha)) {
+		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
+			if (is_qla8032(ha) || is_qla8042(ha)) {
+				ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
+					   __func__);
+				/* disable pause frame for ISP83xx */
+				qla4_83xx_disable_pause(ha);
+			}
+
+			ha->isp_ops->idc_lock(ha);
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_FAILED);
+			ha->isp_ops->idc_unlock(ha);
+			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
+			qla4_8xxx_device_state_handler(ha);
+		}
+
+		if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
+			if (is_qla8042(ha)) {
+				if (ha->idc_info.info2 &
+				    ENABLE_INTERNAL_LOOPBACK) {
+					ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
+						   __func__);
+					status = qla4_84xx_config_acb(ha,
+							    ACB_CONFIG_DISABLE);
+					if (status != QLA_SUCCESS) {
+						ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
+							   __func__);
+					}
+				}
+			}
+			qla4_83xx_post_idc_ack(ha);
+			clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
+		}
+
+		if (is_qla8042(ha) &&
+		    test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
+			ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
+				   __func__);
+			if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
+			    QLA_SUCCESS) {
+				ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
+					   __func__);
+			}
+			clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
+		}
+
+		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
+			qla4_8xxx_need_qsnt_handler(ha);
+		}
+	}
+
+	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
+	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
+		if ((is_qla8022(ha) && ql4xdontresethba) ||
+		    ((is_qla8032(ha) || is_qla8042(ha)) &&
+		     qla4_83xx_idc_dontreset(ha))) {
+			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
+			    ha->host_no, __func__));
+			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+			goto dpc_post_reset_ha;
+		}
+		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
+		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
+			qla4xxx_recover_adapter(ha);
+
+		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
+			uint8_t wait_time = RESET_INTR_TOV;
+
+			while ((readw(&ha->reg->ctrl_status) &
+				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
+				if (--wait_time == 0)
+					break;
+				msleep(1000);
+			}
+			if (wait_time == 0)
+				DEBUG2(printk("scsi%ld: %s: SR|FSR "
+					      "bit not cleared-- resetting\n",
+					      ha->host_no, __func__));
+			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
+				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+				status = qla4xxx_recover_adapter(ha);
+			}
+			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
+			if (status == QLA_SUCCESS)
+				ha->isp_ops->enable_intrs(ha);
+		}
+	}
+
+dpc_post_reset_ha:
+	/* ---- process AEN? --- */
+	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
+		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
+
+	/* ---- Get DHCP IP Address? --- */
+	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
+		qla4xxx_get_dhcp_ip_address(ha);
+
+	/* ---- relogin device? --- */
+	if (adapter_up(ha) &&
+	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+	}
+
+	/* ---- link change? --- */
+	if (!test_bit(AF_LOOPBACK, &ha->flags) &&
+	    test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+		if (!test_bit(AF_LINK_UP, &ha->flags)) {
+			/* ---- link down? --- */
+			qla4xxx_mark_all_devices_missing(ha);
+		} else {
+			/* ---- link up? --- *
+			 * F/W will auto login to all devices ONLY ONCE after
+			 * link up during driver initialization and runtime
+			 * fatal error recovery.  Therefore, the driver must
+			 * manually relogin to devices when recovering from
+			 * connection failures, logouts, expired KATO, etc. */
+			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+				qla4xxx_build_ddb_list(ha, ha->is_reset);
+				iscsi_host_for_each_session(ha->host,
+						qla4xxx_login_flash_ddb);
+			} else
+				qla4xxx_relogin_all_devices(ha);
+		}
+	}
+	if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
+		if (qla4xxx_sysfs_ddb_export(ha))
+			ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
+				   __func__);
+	}
+}
+
+/**
+ * qla4xxx_free_adapter - release the adapter
+ * @ha: pointer to adapter structure
+ **/
+static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
+{
+	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+
+	/* Turn-off interrupts on the card. */
+	ha->isp_ops->disable_intrs(ha);
+
+	if (is_qla40XX(ha)) {
+		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
+		       &ha->reg->ctrl_status);
+		readl(&ha->reg->ctrl_status);
+	} else if (is_qla8022(ha)) {
+		writel(0, &ha->qla4_82xx_reg->host_int);
+		readl(&ha->qla4_82xx_reg->host_int);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		writel(0, &ha->qla4_83xx_reg->risc_intr);
+		readl(&ha->qla4_83xx_reg->risc_intr);
+	}
+
+	/* Remove timer thread, if present */
+	if (ha->timer_active)
+		qla4xxx_stop_timer(ha);
+
+	/* Kill the kernel thread for this host */
+	if (ha->dpc_thread)
+		destroy_workqueue(ha->dpc_thread);
+
+	/* Kill the kernel thread for this host */
+	if (ha->task_wq)
+		destroy_workqueue(ha->task_wq);
+
+	/* Put firmware in known state */
+	ha->isp_ops->reset_firmware(ha);
+
+	if (is_qla80XX(ha)) {
+		ha->isp_ops->idc_lock(ha);
+		qla4_8xxx_clear_drv_active(ha);
+		ha->isp_ops->idc_unlock(ha);
+	}
+
+	/* Detach interrupts */
+	qla4xxx_free_irqs(ha);
+
+	/* free extra memory */
+	qla4xxx_mem_free(ha);
+}
+
+int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
+{
+	int status = 0;
+	unsigned long mem_base, mem_len, db_base, db_len;
+	struct pci_dev *pdev = ha->pdev;
+
+	status = pci_request_regions(pdev, DRIVER_NAME);
+	if (status) {
+		printk(KERN_WARNING
+		    "scsi(%ld) Failed to reserve PIO regions (%s) "
+		    "status=%d\n", ha->host_no, pci_name(pdev), status);
+		goto iospace_error_exit;
+	}
+
+	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
+	    __func__, pdev->revision));
+	ha->revision_id = pdev->revision;
+
+	/* remap phys address */
+	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+	mem_len = pci_resource_len(pdev, 0);
+	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
+	    __func__, mem_base, mem_len));
+
+	/* mapping of pcibase pointer */
+	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
+	if (!ha->nx_pcibase) {
+		printk(KERN_ERR
+		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
+		pci_release_regions(ha->pdev);
+		goto iospace_error_exit;
+	}
+
+	/* Mapping of IO base pointer, door bell read and write pointer */
+
+	/* mapping of IO base pointer */
+	if (is_qla8022(ha)) {
+		ha->qla4_82xx_reg = (struct device_reg_82xx  __iomem *)
+				    ((uint8_t *)ha->nx_pcibase + 0xbc000 +
+				     (ha->pdev->devfn << 11));
+		ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+				    QLA82XX_CAM_RAM_DB2);
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
+				    ((uint8_t *)ha->nx_pcibase);
+	}
+
+	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
+	db_len = pci_resource_len(pdev, 4);
+
+	return 0;
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+/***
+ * qla4xxx_iospace_config - maps registers
+ * @ha: pointer to adapter structure
+ *
+ * This routines maps HBA's registers from the pci address space
+ * into the kernel virtual address space for memory mapped i/o.
+ **/
+int qla4xxx_iospace_config(struct scsi_qla_host *ha)
+{
+	unsigned long pio, pio_len, pio_flags;
+	unsigned long mmio, mmio_len, mmio_flags;
+
+	pio = pci_resource_start(ha->pdev, 0);
+	pio_len = pci_resource_len(ha->pdev, 0);
+	pio_flags = pci_resource_flags(ha->pdev, 0);
+	if (pio_flags & IORESOURCE_IO) {
+		if (pio_len < MIN_IOBASE_LEN) {
+			ql4_printk(KERN_WARNING, ha,
+				"Invalid PCI I/O region size\n");
+			pio = 0;
+		}
+	} else {
+		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
+		pio = 0;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	mmio = pci_resource_start(ha->pdev, 1);
+	mmio_len = pci_resource_len(ha->pdev, 1);
+	mmio_flags = pci_resource_flags(ha->pdev, 1);
+
+	if (!(mmio_flags & IORESOURCE_MEM)) {
+		ql4_printk(KERN_ERR, ha,
+		    "region #0 not an MMIO resource, aborting\n");
+
+		goto iospace_error_exit;
+	}
+
+	if (mmio_len < MIN_IOBASE_LEN) {
+		ql4_printk(KERN_ERR, ha,
+		    "Invalid PCI mem region size, aborting\n");
+		goto iospace_error_exit;
+	}
+
+	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
+		ql4_printk(KERN_WARNING, ha,
+		    "Failed to reserve PIO/MMIO regions\n");
+
+		goto iospace_error_exit;
+	}
+
+	ha->pio_address = pio;
+	ha->pio_length = pio_len;
+	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
+	if (!ha->reg) {
+		ql4_printk(KERN_ERR, ha,
+		    "cannot remap MMIO, aborting\n");
+
+		goto iospace_error_exit;
+	}
+
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+static struct isp_operations qla4xxx_isp_ops = {
+	.iospace_config         = qla4xxx_iospace_config,
+	.pci_config             = qla4xxx_pci_config,
+	.disable_intrs          = qla4xxx_disable_intrs,
+	.enable_intrs           = qla4xxx_enable_intrs,
+	.start_firmware         = qla4xxx_start_firmware,
+	.intr_handler           = qla4xxx_intr_handler,
+	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
+	.reset_chip             = qla4xxx_soft_reset,
+	.reset_firmware         = qla4xxx_hw_reset,
+	.queue_iocb             = qla4xxx_queue_iocb,
+	.complete_iocb          = qla4xxx_complete_iocb,
+	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
+	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
+	.get_sys_info           = qla4xxx_get_sys_info,
+	.queue_mailbox_command	= qla4xxx_queue_mbox_cmd,
+	.process_mailbox_interrupt = qla4xxx_process_mbox_intr,
+};
+
+static struct isp_operations qla4_82xx_isp_ops = {
+	.iospace_config         = qla4_8xxx_iospace_config,
+	.pci_config             = qla4_8xxx_pci_config,
+	.disable_intrs          = qla4_82xx_disable_intrs,
+	.enable_intrs           = qla4_82xx_enable_intrs,
+	.start_firmware         = qla4_8xxx_load_risc,
+	.restart_firmware	= qla4_82xx_try_start_fw,
+	.intr_handler           = qla4_82xx_intr_handler,
+	.interrupt_service_routine = qla4_82xx_interrupt_service_routine,
+	.need_reset		= qla4_8xxx_need_reset,
+	.reset_chip             = qla4_82xx_isp_reset,
+	.reset_firmware         = qla4_8xxx_stop_firmware,
+	.queue_iocb             = qla4_82xx_queue_iocb,
+	.complete_iocb          = qla4_82xx_complete_iocb,
+	.rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
+	.rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
+	.get_sys_info           = qla4_8xxx_get_sys_info,
+	.rd_reg_direct		= qla4_82xx_rd_32,
+	.wr_reg_direct		= qla4_82xx_wr_32,
+	.rd_reg_indirect	= qla4_82xx_md_rd_32,
+	.wr_reg_indirect	= qla4_82xx_md_wr_32,
+	.idc_lock		= qla4_82xx_idc_lock,
+	.idc_unlock		= qla4_82xx_idc_unlock,
+	.rom_lock_recovery	= qla4_82xx_rom_lock_recovery,
+	.queue_mailbox_command	= qla4_82xx_queue_mbox_cmd,
+	.process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
+};
+
+static struct isp_operations qla4_83xx_isp_ops = {
+	.iospace_config		= qla4_8xxx_iospace_config,
+	.pci_config		= qla4_8xxx_pci_config,
+	.disable_intrs		= qla4_83xx_disable_intrs,
+	.enable_intrs		= qla4_83xx_enable_intrs,
+	.start_firmware		= qla4_8xxx_load_risc,
+	.restart_firmware	= qla4_83xx_start_firmware,
+	.intr_handler		= qla4_83xx_intr_handler,
+	.interrupt_service_routine = qla4_83xx_interrupt_service_routine,
+	.need_reset		= qla4_8xxx_need_reset,
+	.reset_chip		= qla4_83xx_isp_reset,
+	.reset_firmware		= qla4_8xxx_stop_firmware,
+	.queue_iocb		= qla4_83xx_queue_iocb,
+	.complete_iocb		= qla4_83xx_complete_iocb,
+	.rd_shdw_req_q_out	= qla4xxx_rd_shdw_req_q_out,
+	.rd_shdw_rsp_q_in	= qla4xxx_rd_shdw_rsp_q_in,
+	.get_sys_info		= qla4_8xxx_get_sys_info,
+	.rd_reg_direct		= qla4_83xx_rd_reg,
+	.wr_reg_direct		= qla4_83xx_wr_reg,
+	.rd_reg_indirect	= qla4_83xx_rd_reg_indirect,
+	.wr_reg_indirect	= qla4_83xx_wr_reg_indirect,
+	.idc_lock		= qla4_83xx_drv_lock,
+	.idc_unlock		= qla4_83xx_drv_unlock,
+	.rom_lock_recovery	= qla4_83xx_rom_lock_recovery,
+	.queue_mailbox_command	= qla4_83xx_queue_mbox_cmd,
+	.process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
+};
+
+uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+{
+	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
+}
+
+uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
+{
+	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
+}
+
+uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+{
+	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
+}
+
+uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
+{
+	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
+}
+
+static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
+{
+	struct scsi_qla_host *ha = data;
+	char *str = buf;
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_ETH_FLAGS:
+		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+		break;
+	case ISCSI_BOOT_ETH_INDEX:
+		rc = sprintf(str, "0\n");
+		break;
+	case ISCSI_BOOT_ETH_MAC:
+		rc = sysfs_format_mac(str, ha->my_mac,
+				      MAC_ADDR_LEN);
+		break;
+	default:
+		rc = -ENOSYS;
+		break;
+	}
+	return rc;
+}
+
+static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+{
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_ETH_FLAGS:
+	case ISCSI_BOOT_ETH_MAC:
+	case ISCSI_BOOT_ETH_INDEX:
+		rc = S_IRUGO;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
+{
+	struct scsi_qla_host *ha = data;
+	char *str = buf;
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		rc = sprintf(str, "%s\n", ha->name_string);
+		break;
+	default:
+		rc = -ENOSYS;
+		break;
+	}
+	return rc;
+}
+
+static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+{
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		rc = S_IRUGO;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static ssize_t
+qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
+			   char *buf)
+{
+	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
+	char *str = buf;
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_NAME:
+		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
+		break;
+	case ISCSI_BOOT_TGT_IP_ADDR:
+		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
+			rc = sprintf(buf, "%pI4\n",
+				     &boot_conn->dest_ipaddr.ip_address);
+		else
+			rc = sprintf(str, "%pI6\n",
+				     &boot_conn->dest_ipaddr.ip_address);
+		break;
+	case ISCSI_BOOT_TGT_PORT:
+			rc = sprintf(str, "%d\n", boot_conn->dest_port);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->chap.target_chap_name_length,
+			     (char *)&boot_conn->chap.target_chap_name);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->chap.target_secret_length,
+			     (char *)&boot_conn->chap.target_secret);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->chap.intr_chap_name_length,
+			     (char *)&boot_conn->chap.intr_chap_name);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->chap.intr_secret_length,
+			     (char *)&boot_conn->chap.intr_secret);
+		break;
+	case ISCSI_BOOT_TGT_FLAGS:
+		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+		break;
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+		rc = sprintf(str, "0\n");
+		break;
+	default:
+		rc = -ENOSYS;
+		break;
+	}
+	return rc;
+}
+
+static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
+{
+	struct scsi_qla_host *ha = data;
+	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
+
+	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
+}
+
+static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
+{
+	struct scsi_qla_host *ha = data;
+	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
+
+	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
+}
+
+static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+{
+	int rc;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_NAME:
+	case ISCSI_BOOT_TGT_IP_ADDR:
+	case ISCSI_BOOT_TGT_PORT:
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+	case ISCSI_BOOT_TGT_FLAGS:
+		rc = S_IRUGO;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static void qla4xxx_boot_release(void *data)
+{
+	struct scsi_qla_host *ha = data;
+
+	scsi_host_put(ha->host);
+}
+
+static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
+{
+	dma_addr_t buf_dma;
+	uint32_t addr, pri_addr, sec_addr;
+	uint32_t offset;
+	uint16_t func_num;
+	uint8_t val;
+	uint8_t *buf = NULL;
+	size_t size = 13 * sizeof(uint8_t);
+	int ret = QLA_SUCCESS;
+
+	func_num = PCI_FUNC(ha->pdev->devfn);
+
+	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
+		   __func__, ha->pdev->device, func_num);
+
+	if (is_qla40XX(ha)) {
+		if (func_num == 1) {
+			addr = NVRAM_PORT0_BOOT_MODE;
+			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
+			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
+		} else if (func_num == 3) {
+			addr = NVRAM_PORT1_BOOT_MODE;
+			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
+			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
+		} else {
+			ret = QLA_ERROR;
+			goto exit_boot_info;
+		}
+
+		/* Check Boot Mode */
+		val = rd_nvram_byte(ha, addr);
+		if (!(val & 0x07)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
+					  "options : 0x%x\n", __func__, val));
+			ret = QLA_ERROR;
+			goto exit_boot_info;
+		}
+
+		/* get primary valid target index */
+		val = rd_nvram_byte(ha, pri_addr);
+		if (val & BIT_7)
+			ddb_index[0] = (val & 0x7f);
+
+		/* get secondary valid target index */
+		val = rd_nvram_byte(ha, sec_addr);
+		if (val & BIT_7)
+			ddb_index[1] = (val & 0x7f);
+		goto exit_boot_info;
+	} else if (is_qla80XX(ha)) {
+		buf = dma_alloc_coherent(&ha->pdev->dev, size,
+					 &buf_dma, GFP_KERNEL);
+		if (!buf) {
+			DEBUG2(ql4_printk(KERN_ERR, ha,
+					  "%s: Unable to allocate dma buffer\n",
+					   __func__));
+			ret = QLA_ERROR;
+			goto exit_boot_info;
+		}
+
+		if (ha->port_num == 0)
+			offset = BOOT_PARAM_OFFSET_PORT0;
+		else if (ha->port_num == 1)
+			offset = BOOT_PARAM_OFFSET_PORT1;
+		else {
+			ret = QLA_ERROR;
+			goto exit_boot_info_free;
+		}
+		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
+		       offset;
+		if (qla4xxx_get_flash(ha, buf_dma, addr,
+				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
+			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
+					  " failed\n", ha->host_no, __func__));
+			ret = QLA_ERROR;
+			goto exit_boot_info_free;
+		}
+		/* Check Boot Mode */
+		if (!(buf[1] & 0x07)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
+					  " : 0x%x\n", buf[1]));
+			ret = QLA_ERROR;
+			goto exit_boot_info_free;
+		}
+
+		/* get primary valid target index */
+		if (buf[2] & BIT_7)
+			ddb_index[0] = buf[2] & 0x7f;
+
+		/* get secondary valid target index */
+		if (buf[11] & BIT_7)
+			ddb_index[1] = buf[11] & 0x7f;
+	} else {
+		ret = QLA_ERROR;
+		goto exit_boot_info;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
+			  " target ID %d\n", __func__, ddb_index[0],
+			  ddb_index[1]));
+
+exit_boot_info_free:
+	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
+exit_boot_info:
+	ha->pri_ddb_idx = ddb_index[0];
+	ha->sec_ddb_idx = ddb_index[1];
+	return ret;
+}
+
+/**
+ * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to be returned
+ * @password: CHAP password to be returned
+ *
+ * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
+ * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
+ * So from the CHAP cache find the first BIDI CHAP entry and set it
+ * to the boot record in sysfs.
+ **/
+static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
+			    char *password)
+{
+	int i, ret = -EINVAL;
+	int max_chap_entries = 0;
+	struct ql4_chap_table *chap_table;
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+						sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+		return ret;
+	}
+
+	mutex_lock(&ha->chap_sem);
+	for (i = 0; i < max_chap_entries; i++) {
+		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+		if (chap_table->cookie !=
+		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+			continue;
+		}
+
+		if (chap_table->flags & BIT_7) /* local */
+			continue;
+
+		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
+			continue;
+
+		strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+		strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+		ret = 0;
+		break;
+	}
+	mutex_unlock(&ha->chap_sem);
+
+	return ret;
+}
+
+
+static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
+				   struct ql4_boot_session_info *boot_sess,
+				   uint16_t ddb_index)
+{
+	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_entry_dma;
+	uint16_t idx;
+	uint16_t options;
+	int ret = QLA_SUCCESS;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer.\n",
+				  __func__));
+		ret = QLA_ERROR;
+		return ret;
+	}
+
+	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
+				   fw_ddb_entry_dma, ddb_index)) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
+				  "index [%d]\n", __func__, ddb_index));
+		ret = QLA_ERROR;
+		goto exit_boot_target;
+	}
+
+	/* Update target name and IP from DDB */
+	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
+	       min(sizeof(boot_sess->target_name),
+		   sizeof(fw_ddb_entry->iscsi_name)));
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE) {
+		memcpy(&boot_conn->dest_ipaddr.ip_address,
+		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
+	} else {
+		boot_conn->dest_ipaddr.ip_type = 0x1;
+		memcpy(&boot_conn->dest_ipaddr.ip_address,
+		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
+	}
+
+	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
+
+	/* update chap information */
+	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+
+	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
+
+		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
+
+		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
+				       target_chap_name,
+				       (char *)&boot_conn->chap.target_secret,
+				       idx);
+		if (ret) {
+			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
+			ret = QLA_ERROR;
+			goto exit_boot_target;
+		}
+
+		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
+		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
+	}
+
+	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
+
+		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
+
+		ret = qla4xxx_get_bidi_chap(ha,
+				    (char *)&boot_conn->chap.intr_chap_name,
+				    (char *)&boot_conn->chap.intr_secret);
+
+		if (ret) {
+			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
+			ret = QLA_ERROR;
+			goto exit_boot_target;
+		}
+
+		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
+		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
+	}
+
+exit_boot_target:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+			  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret;
+}
+
+static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
+{
+	uint16_t ddb_index[2];
+	int ret = QLA_ERROR;
+	int rval;
+
+	memset(ddb_index, 0, sizeof(ddb_index));
+	ddb_index[0] = 0xffff;
+	ddb_index[1] = 0xffff;
+	ret = get_fw_boot_info(ha, ddb_index);
+	if (ret != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				"%s: No boot target configured.\n", __func__));
+		return ret;
+	}
+
+	if (ql4xdisablesysfsboot)
+		return QLA_SUCCESS;
+
+	if (ddb_index[0] == 0xffff)
+		goto sec_target;
+
+	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
+				      ddb_index[0]);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
+				  "configured\n", __func__));
+	} else
+		ret = QLA_SUCCESS;
+
+sec_target:
+	if (ddb_index[1] == 0xffff)
+		goto exit_get_boot_info;
+
+	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
+				      ddb_index[1]);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
+				  " configured\n", __func__));
+	} else
+		ret = QLA_SUCCESS;
+
+exit_get_boot_info:
+	return ret;
+}
+
+static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
+{
+	struct iscsi_boot_kobj *boot_kobj;
+
+	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
+		return QLA_ERROR;
+
+	if (ql4xdisablesysfsboot) {
+		ql4_printk(KERN_INFO, ha,
+			   "%s: syfsboot disabled - driver will trigger login "
+			   "and publish session for discovery .\n", __func__);
+		return QLA_SUCCESS;
+	}
+
+
+	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
+	if (!ha->boot_kset)
+		goto kset_free;
+
+	if (!scsi_host_get(ha->host))
+		goto kset_free;
+	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
+					     qla4xxx_show_boot_tgt_pri_info,
+					     qla4xxx_tgt_get_attr_visibility,
+					     qla4xxx_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	if (!scsi_host_get(ha->host))
+		goto kset_free;
+	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
+					     qla4xxx_show_boot_tgt_sec_info,
+					     qla4xxx_tgt_get_attr_visibility,
+					     qla4xxx_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	if (!scsi_host_get(ha->host))
+		goto kset_free;
+	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
+					       qla4xxx_show_boot_ini_info,
+					       qla4xxx_ini_get_attr_visibility,
+					       qla4xxx_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	if (!scsi_host_get(ha->host))
+		goto kset_free;
+	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
+					       qla4xxx_show_boot_eth_info,
+					       qla4xxx_eth_get_attr_visibility,
+					       qla4xxx_boot_release);
+	if (!boot_kobj)
+		goto put_host;
+
+	return QLA_SUCCESS;
+
+put_host:
+	scsi_host_put(ha->host);
+kset_free:
+	iscsi_boot_destroy_kset(ha->boot_kset);
+	return -ENOMEM;
+}
+
+
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+				  struct ql4_tuple_ddb *tddb)
+{
+	struct scsi_qla_host *ha;
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+
+	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+	ha = ddb_entry->ha;
+	cls_sess = ddb_entry->sess;
+	sess = cls_sess->dd_data;
+	cls_conn = ddb_entry->conn;
+	conn = cls_conn->dd_data;
+
+	tddb->tpgt = sess->tpgt;
+	tddb->port = conn->persistent_port;
+	strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+	strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+				      struct ql4_tuple_ddb *tddb,
+				      uint8_t *flash_isid)
+{
+	uint16_t options = 0;
+
+	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE)
+		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+	else
+		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+	tddb->port = le16_to_cpu(fw_ddb_entry->port);
+
+	if (flash_isid == NULL)
+		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
+		       sizeof(tddb->isid));
+	else
+		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+				     struct ql4_tuple_ddb *old_tddb,
+				     struct ql4_tuple_ddb *new_tddb,
+				     uint8_t is_isid_compare)
+{
+	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+		return QLA_ERROR;
+
+	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+		return QLA_ERROR;
+
+	if (old_tddb->port != new_tddb->port)
+		return QLA_ERROR;
+
+	/* For multi sessions, driver generates the ISID, so do not compare
+	 * ISID in reset path since it would be a comparison between the
+	 * driver generated ISID and firmware generated ISID. This could
+	 * lead to adding duplicated DDBs in the list as driver generated
+	 * ISID would not match firmware generated ISID.
+	 */
+	if (is_isid_compare) {
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+			"%s: old ISID [%pmR] New ISID [%pmR]\n",
+			__func__, old_tddb->isid, new_tddb->isid));
+
+		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+			   sizeof(old_tddb->isid)))
+			return QLA_ERROR;
+	}
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+			  new_tddb->ip_addr, new_tddb->iscsi_name));
+
+	return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+				     struct dev_db_entry *fw_ddb_entry,
+				     uint32_t *index)
+{
+	struct ddb_entry *ddb_entry;
+	struct ql4_tuple_ddb *fw_tddb = NULL;
+	struct ql4_tuple_ddb *tmp_tddb = NULL;
+	int idx;
+	int ret = QLA_ERROR;
+
+	fw_tddb = vzalloc(sizeof(*fw_tddb));
+	if (!fw_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+	if (!tmp_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
+
+	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
+			ret = QLA_SUCCESS; /* found */
+			if (index != NULL)
+				*index = idx;
+			goto exit_check;
+		}
+	}
+
+exit_check:
+	if (fw_tddb)
+		vfree(fw_tddb);
+	if (tmp_tddb)
+		vfree(tmp_tddb);
+	return ret;
+}
+
+/**
+ * qla4xxx_check_existing_isid - check if target with same isid exist
+ *				 in target list
+ * @list_nt: list of target
+ * @isid: isid to check
+ *
+ * This routine return QLA_SUCCESS if target with same isid exist
+ **/
+static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
+{
+	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+	struct dev_db_entry *fw_ddb_entry;
+
+	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
+
+		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
+			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
+			return QLA_SUCCESS;
+		}
+	}
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_update_isid - compare ddbs and updated isid
+ * @ha: Pointer to host adapter structure.
+ * @list_nt: list of nt target
+ * @fw_ddb_entry: firmware ddb entry
+ *
+ * This routine update isid if ddbs have same iqn, same isid and
+ * different IP addr.
+ * Return QLA_SUCCESS if isid is updated.
+ **/
+static int qla4xxx_update_isid(struct scsi_qla_host *ha,
+			       struct list_head *list_nt,
+			       struct dev_db_entry *fw_ddb_entry)
+{
+	uint8_t base_value, i;
+
+	base_value = fw_ddb_entry->isid[1] & 0x1f;
+	for (i = 0; i < 8; i++) {
+		fw_ddb_entry->isid[1] = (base_value | (i << 5));
+		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
+			break;
+	}
+
+	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
+		return QLA_ERROR;
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_should_update_isid - check if isid need to update
+ * @ha: Pointer to host adapter structure.
+ * @old_tddb: ddb tuple
+ * @new_tddb: ddb tuple
+ *
+ * Return QLA_SUCCESS if different IP, different PORT, same iqn,
+ * same isid
+ **/
+static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
+				      struct ql4_tuple_ddb *old_tddb,
+				      struct ql4_tuple_ddb *new_tddb)
+{
+	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
+		/* Same ip */
+		if (old_tddb->port == new_tddb->port)
+			return QLA_ERROR;
+	}
+
+	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+		/* different iqn */
+		return QLA_ERROR;
+
+	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+		   sizeof(old_tddb->isid)))
+		/* different isid */
+		return QLA_ERROR;
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
+ * @ha: Pointer to host adapter structure.
+ * @list_nt: list of nt target.
+ * @fw_ddb_entry: firmware ddb entry.
+ *
+ * This routine check if fw_ddb_entry already exists in list_nt to avoid
+ * duplicate ddb in list_nt.
+ * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
+ * Note: This function also update isid of DDB if required.
+ **/
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+				       struct list_head *list_nt,
+				       struct dev_db_entry *fw_ddb_entry)
+{
+	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+	struct ql4_tuple_ddb *fw_tddb = NULL;
+	struct ql4_tuple_ddb *tmp_tddb = NULL;
+	int rval, ret = QLA_ERROR;
+
+	fw_tddb = vzalloc(sizeof(*fw_tddb));
+	if (!fw_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+	if (!tmp_tddb) {
+		DEBUG2(ql4_printk(KERN_WARNING, ha,
+				  "Memory Allocation failed.\n"));
+		ret = QLA_SUCCESS;
+		goto exit_check;
+	}
+
+	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
+
+	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
+					  nt_ddb_idx->flash_isid);
+		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
+		/* found duplicate ddb */
+		if (ret == QLA_SUCCESS)
+			goto exit_check;
+	}
+
+	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
+
+		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
+		if (ret == QLA_SUCCESS) {
+			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
+			if (rval == QLA_SUCCESS)
+				ret = QLA_ERROR;
+			else
+				ret = QLA_SUCCESS;
+
+			goto exit_check;
+		}
+	}
+
+exit_check:
+	if (fw_tddb)
+		vfree(fw_tddb);
+	if (tmp_tddb)
+		vfree(tmp_tddb);
+	return ret;
+}
+
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
+{
+	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+		list_del_init(&ddb_idx->list);
+		vfree(ddb_idx);
+	}
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+					struct dev_db_entry *fw_ddb_entry)
+{
+	struct iscsi_endpoint *ep;
+	struct sockaddr_in *addr;
+	struct sockaddr_in6 *addr6;
+	struct sockaddr *t_addr;
+	struct sockaddr_storage *dst_addr;
+	char *ip;
+
+	/* TODO: need to destroy on unload iscsi_endpoint*/
+	dst_addr = vmalloc(sizeof(*dst_addr));
+	if (!dst_addr)
+		return NULL;
+
+	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+		t_addr = (struct sockaddr *)dst_addr;
+		t_addr->sa_family = AF_INET6;
+		addr6 = (struct sockaddr_in6 *)dst_addr;
+		ip = (char *)&addr6->sin6_addr;
+		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+	} else {
+		t_addr = (struct sockaddr *)dst_addr;
+		t_addr->sa_family = AF_INET;
+		addr = (struct sockaddr_in *)dst_addr;
+		ip = (char *)&addr->sin_addr;
+		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+	}
+
+	ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+	vfree(dst_addr);
+	return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+	if (ql4xdisablesysfsboot)
+		return QLA_SUCCESS;
+	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+		return QLA_ERROR;
+	return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+					  struct ddb_entry *ddb_entry,
+					  uint16_t idx)
+{
+	uint16_t def_timeout;
+
+	ddb_entry->ddb_type = FLASH_DDB;
+	ddb_entry->fw_ddb_index = INVALID_ENTRY;
+	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+	ddb_entry->ha = ha;
+	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+	ddb_entry->chap_tbl_idx = INVALID_ENTRY;
+
+	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+	atomic_set(&ddb_entry->relogin_timer, 0);
+	atomic_set(&ddb_entry->relogin_retry_count, 0);
+	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+	ddb_entry->default_relogin_timeout =
+		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+		def_timeout : LOGIN_TOV;
+	ddb_entry->default_time2wait =
+		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+
+	if (ql4xdisablesysfsboot &&
+	    (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
+		set_bit(DF_BOOT_TGT, &ddb_entry->flags);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+	uint32_t idx = 0;
+	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+	uint32_t sts[MBOX_REG_COUNT];
+	uint32_t ip_state;
+	unsigned long wtime;
+	int ret;
+
+	wtime = jiffies + (HZ * IP_CONFIG_TOV);
+	do {
+		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+			if (ip_idx[idx] == -1)
+				continue;
+
+			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+			if (ret == QLA_ERROR) {
+				ip_idx[idx] = -1;
+				continue;
+			}
+
+			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Waiting for IP state for idx = %d, state = 0x%x\n",
+					  ip_idx[idx], ip_state));
+			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+			    ip_state == IP_ADDRSTATE_INVALID ||
+			    ip_state == IP_ADDRSTATE_PREFERRED ||
+			    ip_state == IP_ADDRSTATE_DEPRICATED ||
+			    ip_state == IP_ADDRSTATE_DISABLING)
+				ip_idx[idx] = -1;
+		}
+
+		/* Break if all IP states checked */
+		if ((ip_idx[0] == -1) &&
+		    (ip_idx[1] == -1) &&
+		    (ip_idx[2] == -1) &&
+		    (ip_idx[3] == -1))
+			break;
+		schedule_timeout_uninterruptible(HZ);
+	} while (time_after(wtime, jiffies));
+}
+
+static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
+				  struct dev_db_entry *flash_ddb_entry)
+{
+	uint16_t options = 0;
+	size_t ip_len = IP_ADDR_LEN;
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	if (options & DDB_OPT_IPV6_DEVICE)
+		ip_len = IPv6_ADDR_LEN;
+
+	if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
+		return QLA_ERROR;
+
+	if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
+		   sizeof(fw_ddb_entry->isid)))
+		return QLA_ERROR;
+
+	if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
+		   sizeof(fw_ddb_entry->port)))
+		return QLA_ERROR;
+
+	return QLA_SUCCESS;
+}
+
+static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
+				     struct dev_db_entry *fw_ddb_entry,
+				     uint32_t fw_idx, uint32_t *flash_index)
+{
+	struct dev_db_entry *flash_ddb_entry;
+	dma_addr_t flash_ddb_entry_dma;
+	uint32_t idx = 0;
+	int max_ddbs;
+	int ret = QLA_ERROR, status;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+					 &flash_ddb_entry_dma);
+	if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "Out of memory\n");
+		goto exit_find_st_idx;
+	}
+
+	status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+					  flash_ddb_entry_dma, fw_idx);
+	if (status == QLA_SUCCESS) {
+		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+		if (status == QLA_SUCCESS) {
+			*flash_index = fw_idx;
+			ret = QLA_SUCCESS;
+			goto exit_find_st_idx;
+		}
+	}
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+						  flash_ddb_entry_dma, idx);
+		if (status == QLA_ERROR)
+			continue;
+
+		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+		if (status == QLA_SUCCESS) {
+			*flash_index = idx;
+			ret = QLA_SUCCESS;
+			goto exit_find_st_idx;
+		}
+	}
+
+	if (idx == max_ddbs)
+		ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
+			   fw_idx);
+
+exit_find_st_idx:
+	if (flash_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
+			      flash_ddb_entry_dma);
+
+	return ret;
+}
+
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+				  struct list_head *list_st)
+{
+	struct qla_ddb_index  *st_ddb_idx;
+	int max_ddbs;
+	int fw_idx_size;
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_dma;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint32_t flash_index = -1;
+	uint16_t conn_id = 0;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_st_list;
+	}
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+	fw_idx_size = sizeof(struct qla_ddb_index);
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+					      NULL, &next_idx, &state,
+					      &conn_err, NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		/* Ignore DDB if invalid state (unassigned) */
+		if (state == DDB_DS_UNASSIGNED)
+			goto continue_next_st;
+
+		/* Check if ST, add to the list_st */
+		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+			goto continue_next_st;
+
+		st_ddb_idx = vzalloc(fw_idx_size);
+		if (!st_ddb_idx)
+			break;
+
+		ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
+						&flash_index);
+		if (ret == QLA_ERROR) {
+			ql4_printk(KERN_ERR, ha,
+				   "No flash entry for ST at idx [%d]\n", idx);
+			st_ddb_idx->flash_ddb_idx = idx;
+		} else {
+			ql4_printk(KERN_INFO, ha,
+				   "ST at idx [%d] is stored at flash [%d]\n",
+				   idx, flash_index);
+			st_ddb_idx->flash_ddb_idx = flash_index;
+		}
+
+		st_ddb_idx->fw_ddb_idx = idx;
+
+		list_add_tail(&st_ddb_idx->list, list_st);
+continue_next_st:
+		if (next_idx == 0)
+			break;
+	}
+
+exit_st_list:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+				      struct list_head *list_ddb)
+{
+	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+	uint32_t next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	int ret;
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+					      NULL, 0, NULL, &next_idx, &state,
+					      &conn_err, NULL, NULL);
+		if (ret == QLA_ERROR)
+			continue;
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED) {
+			list_del_init(&ddb_idx->list);
+			vfree(ddb_idx);
+		}
+	}
+}
+
+static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
+					 struct ddb_entry *ddb_entry,
+					 struct dev_db_entry *fw_ddb_entry)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_session *sess;
+	uint32_t max_ddbs = 0;
+	uint16_t ddb_link = -1;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	cls_sess = ddb_entry->sess;
+	sess = cls_sess->dd_data;
+
+	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+	if (ddb_link < max_ddbs)
+		sess->discovery_parent_idx = ddb_link;
+	else
+		sess->discovery_parent_idx = DDB_NO_LINK;
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+				   struct dev_db_entry *fw_ddb_entry,
+				   int is_reset, uint16_t idx)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_session *sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct iscsi_endpoint *ep;
+	uint16_t cmds_max = 32;
+	uint16_t conn_id = 0;
+	uint32_t initial_cmdsn = 0;
+	int ret = QLA_SUCCESS;
+
+	struct ddb_entry *ddb_entry = NULL;
+
+	/* Create session object, with INVALID_ENTRY,
+	 * the targer_id would get set when we issue the login
+	 */
+	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+				       cmds_max, sizeof(struct ddb_entry),
+				       sizeof(struct ql4_task_data),
+				       initial_cmdsn, INVALID_ENTRY);
+	if (!cls_sess) {
+		ret = QLA_ERROR;
+		goto exit_setup;
+	}
+
+	/*
+	 * so calling module_put function to decrement the
+	 * reference count.
+	 **/
+	module_put(qla4xxx_iscsi_transport.owner);
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ddb_entry->sess = cls_sess;
+
+	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+	       sizeof(struct dev_db_entry));
+
+	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
+
+	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+	if (!cls_conn) {
+		ret = QLA_ERROR;
+		goto exit_setup;
+	}
+
+	ddb_entry->conn = cls_conn;
+
+	/* Setup ep, for displaying attributes in sysfs */
+	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+	if (ep) {
+		ep->conn = cls_conn;
+		cls_conn->ep = ep;
+	} else {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+		ret = QLA_ERROR;
+		goto exit_setup;
+	}
+
+	/* Update sess/conn params */
+	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+	qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
+
+	if (is_reset == RESET_ADAPTER) {
+		iscsi_block_session(cls_sess);
+		/* Use the relogin path to discover new devices
+		 *  by short-circuting the logic of setting
+		 *  timer to relogin - instead set the flags
+		 *  to initiate login right away.
+		 */
+		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+		set_bit(DF_RELOGIN, &ddb_entry->flags);
+	}
+
+exit_setup:
+	return ret;
+}
+
+static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
+				       struct list_head *list_ddb,
+				       struct dev_db_entry *fw_ddb_entry)
+{
+	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+	uint16_t ddb_link;
+
+	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+		if (ddb_idx->fw_ddb_idx == ddb_link) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "Updating NT parent idx from [%d] to [%d]\n",
+					  ddb_link, ddb_idx->flash_ddb_idx));
+			fw_ddb_entry->ddb_link =
+					    cpu_to_le16(ddb_idx->flash_ddb_idx);
+			return;
+		}
+	}
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+				  struct list_head *list_nt,
+				  struct list_head *list_st,
+				  int is_reset)
+{
+	struct dev_db_entry *fw_ddb_entry;
+	struct ddb_entry *ddb_entry = NULL;
+	dma_addr_t fw_ddb_dma;
+	int max_ddbs;
+	int fw_idx_size;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint32_t ddb_idx = -1;
+	uint16_t conn_id = 0;
+	uint16_t ddb_link = -1;
+	struct qla_ddb_index  *nt_ddb_idx;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_nt_list;
+	}
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+	fw_idx_size = sizeof(struct qla_ddb_index);
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+					      NULL, &next_idx, &state,
+					      &conn_err, NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+			goto continue_next_nt;
+
+		/* Check if NT, then add to list it */
+		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+			goto continue_next_nt;
+
+		ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+		if (ddb_link < max_ddbs)
+			qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
+
+		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED) &&
+		    (is_reset == INIT_ADAPTER))
+			goto continue_next_nt;
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Adding  DDB to session = 0x%x\n", idx));
+
+		if (is_reset == INIT_ADAPTER) {
+			nt_ddb_idx = vmalloc(fw_idx_size);
+			if (!nt_ddb_idx)
+				break;
+
+			nt_ddb_idx->fw_ddb_idx = idx;
+
+			/* Copy original isid as it may get updated in function
+			 * qla4xxx_update_isid(). We need original isid in
+			 * function qla4xxx_compare_tuple_ddb to find duplicate
+			 * target */
+			memcpy(&nt_ddb_idx->flash_isid[0],
+			       &fw_ddb_entry->isid[0],
+			       sizeof(nt_ddb_idx->flash_isid));
+
+			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
+							  fw_ddb_entry);
+			if (ret == QLA_SUCCESS) {
+				/* free nt_ddb_idx and do not add to list_nt */
+				vfree(nt_ddb_idx);
+				goto continue_next_nt;
+			}
+
+			/* Copy updated isid */
+			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+			       sizeof(struct dev_db_entry));
+
+			list_add_tail(&nt_ddb_idx->list, list_nt);
+		} else if (is_reset == RESET_ADAPTER) {
+			ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
+							&ddb_idx);
+			if (ret == QLA_SUCCESS) {
+				ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+								       ddb_idx);
+				if (ddb_entry != NULL)
+					qla4xxx_update_sess_disc_idx(ha,
+								     ddb_entry,
+								  fw_ddb_entry);
+				goto continue_next_nt;
+			}
+		}
+
+		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
+		if (ret == QLA_ERROR)
+			goto exit_nt_list;
+
+continue_next_nt:
+		if (next_idx == 0)
+			break;
+	}
+
+exit_nt_list:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
+				      struct list_head *list_nt,
+				      uint16_t target_id)
+{
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_dma;
+	int max_ddbs;
+	int fw_idx_size;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t conn_id = 0;
+	struct qla_ddb_index  *nt_ddb_idx;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_new_nt_list;
+	}
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+	fw_idx_size = sizeof(struct qla_ddb_index);
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+					      NULL, &next_idx, &state,
+					      &conn_err, NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		/* Check if NT, then add it to list */
+		if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+			goto continue_next_new_nt;
+
+		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
+			goto continue_next_new_nt;
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Adding  DDB to session = 0x%x\n", idx));
+
+		nt_ddb_idx = vmalloc(fw_idx_size);
+		if (!nt_ddb_idx)
+			break;
+
+		nt_ddb_idx->fw_ddb_idx = idx;
+
+		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
+		if (ret == QLA_SUCCESS) {
+			/* free nt_ddb_idx and do not add to list_nt */
+			vfree(nt_ddb_idx);
+			goto continue_next_new_nt;
+		}
+
+		if (target_id < max_ddbs)
+			fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
+
+		list_add_tail(&nt_ddb_idx->list, list_nt);
+
+		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+					      idx);
+		if (ret == QLA_ERROR)
+			goto exit_new_nt_list;
+
+continue_next_new_nt:
+		if (next_idx == 0)
+			break;
+	}
+
+exit_new_nt_list:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
+ * @dev: dev associated with the sysfs entry
+ * @data: pointer to flashnode session object
+ *
+ * Returns:
+ *	1: if flashnode entry is non-persistent
+ *	0: if flashnode entry is persistent
+ **/
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+{
+	struct iscsi_bus_flash_session *fnode_sess;
+
+	if (!iscsi_flashnode_bus_match(dev, NULL))
+		return 0;
+
+	fnode_sess = iscsi_dev_to_flash_session(dev);
+
+	return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
+ * @ha: pointer to host
+ * @fw_ddb_entry: flash ddb data
+ * @idx: target index
+ * @user: if set then this call is made from userland else from kernel
+ *
+ * Returns:
+ * On sucess: QLA_SUCCESS
+ * On failure: QLA_ERROR
+ *
+ * This create separate sysfs entries for session and connection attributes of
+ * the given fw ddb entry.
+ * If this is invoked as a result of a userspace call then the entry is marked
+ * as nonpersistent using flash_state field.
+ **/
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+					struct dev_db_entry *fw_ddb_entry,
+					uint16_t *idx, int user)
+{
+	struct iscsi_bus_flash_session *fnode_sess = NULL;
+	struct iscsi_bus_flash_conn *fnode_conn = NULL;
+	int rc = QLA_ERROR;
+
+	fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
+						 &qla4xxx_iscsi_transport, 0);
+	if (!fnode_sess) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
+			   __func__, *idx, ha->host_no);
+		goto exit_tgt_create;
+	}
+
+	fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
+						 &qla4xxx_iscsi_transport, 0);
+	if (!fnode_conn) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
+			   __func__, *idx, ha->host_no);
+		goto free_sess;
+	}
+
+	if (user) {
+		fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
+	} else {
+		fnode_sess->flash_state = DEV_DB_PERSISTENT;
+
+		if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
+			fnode_sess->is_boot_target = 1;
+		else
+			fnode_sess->is_boot_target = 0;
+	}
+
+	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
+					   fw_ddb_entry);
+	if (rc)
+		goto free_sess;
+
+	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+		   __func__, fnode_sess->dev.kobj.name);
+
+	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+		   __func__, fnode_conn->dev.kobj.name);
+
+	return QLA_SUCCESS;
+
+free_sess:
+	iscsi_destroy_flashnode_sess(fnode_sess);
+
+exit_tgt_create:
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
+ * @shost: pointer to host
+ * @buf: type of ddb entry (ipv4/ipv6)
+ * @len: length of buf
+ *
+ * This creates new ddb entry in the flash by finding first free index and
+ * storing default ddb there. And then create sysfs entry for the new ddb entry.
+ **/
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+				 int len)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	struct device *dev;
+	uint16_t idx = 0;
+	uint16_t max_ddbs = 0;
+	uint32_t options = 0;
+	uint32_t rval = QLA_ERROR;
+
+	if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
+	    strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
+				  __func__));
+		goto exit_ddb_add;
+	}
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+				     MAX_DEV_DB_ENTRIES;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		goto exit_ddb_add;
+	}
+
+	dev = iscsi_find_flashnode_sess(ha->host, NULL,
+					qla4xxx_sysfs_ddb_is_non_persistent);
+	if (dev) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: A non-persistent entry %s found\n",
+			   __func__, dev->kobj.name);
+		put_device(dev);
+		goto exit_ddb_add;
+	}
+
+	/* Index 0 and 1 are reserved for boot target entries */
+	for (idx = 2; idx < max_ddbs; idx++) {
+		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
+					     fw_ddb_entry_dma, idx))
+			break;
+	}
+
+	if (idx == max_ddbs)
+		goto exit_ddb_add;
+
+	if (!strncasecmp("ipv6", buf, 4))
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (rval == QLA_ERROR)
+		goto exit_ddb_add;
+
+	rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
+
+exit_ddb_add:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	if (rval == QLA_SUCCESS)
+		return idx;
+	else
+		return -EIO;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This writes the contents of target ddb buffer to Flash with a valid cookie
+ * value in order to make the ddb entry persistent.
+ **/
+static int  qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t options = 0;
+	int rval = 0;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		rval = -ENOMEM;
+		goto exit_ddb_apply;
+	}
+
+	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (rval == QLA_ERROR)
+		goto exit_ddb_apply;
+
+	dev_db_start_offset += (fnode_sess->target_id *
+				sizeof(*fw_ddb_entry));
+
+	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+	rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+				 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
+
+	if (rval == QLA_SUCCESS) {
+		fnode_sess->flash_state = DEV_DB_PERSISTENT;
+		ql4_printk(KERN_INFO, ha,
+			   "%s: flash node %u of host %lu written to flash\n",
+			   __func__, fnode_sess->target_id, ha->host_no);
+	} else {
+		rval = -EIO;
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Error while writing flash node %u of host %lu to flash\n",
+			   __func__, fnode_sess->target_id, ha->host_no);
+	}
+
+exit_ddb_apply:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return rval;
+}
+
+static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
+					   struct dev_db_entry *fw_ddb_entry,
+					   uint16_t idx)
+{
+	struct dev_db_entry *ddb_entry = NULL;
+	dma_addr_t ddb_entry_dma;
+	unsigned long wtime;
+	uint32_t mbx_sts = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t tmo = 0;
+	int ret = 0;
+
+	ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+				       &ddb_entry_dma, GFP_KERNEL);
+	if (!ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		return QLA_ERROR;
+	}
+
+	memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
+
+	ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
+	if (ret != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to set ddb entry for index %d\n",
+				  __func__, idx));
+		goto exit_ddb_conn_open;
+	}
+
+	qla4xxx_conn_open(ha, idx);
+
+	/* To ensure that sendtargets is done, wait for at least 12 secs */
+	tmo = ((ha->def_timeout > LOGIN_TOV) &&
+	       (ha->def_timeout < LOGIN_TOV * 10) ?
+	       ha->def_timeout : LOGIN_TOV);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Default time to wait for login to ddb %d\n", tmo));
+
+	wtime = jiffies + (HZ * tmo);
+	do {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+					      NULL, &state, &conn_err, NULL,
+					      NULL);
+		if (ret == QLA_ERROR)
+			continue;
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED)
+			break;
+
+		schedule_timeout_uninterruptible(HZ / 10);
+	} while (time_after(wtime, jiffies));
+
+exit_ddb_conn_open:
+	if (ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+				  ddb_entry, ddb_entry_dma);
+	return ret;
+}
+
+static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
+				struct dev_db_entry *fw_ddb_entry,
+				uint16_t target_id)
+{
+	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+	struct list_head list_nt;
+	uint16_t ddb_index;
+	int ret = 0;
+
+	if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s: A discovery already in progress!\n", __func__);
+		return QLA_ERROR;
+	}
+
+	INIT_LIST_HEAD(&list_nt);
+
+	set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+
+	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+	if (ret == QLA_ERROR)
+		goto exit_login_st_clr_bit;
+
+	ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
+	if (ret == QLA_ERROR)
+		goto exit_login_st;
+
+	qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
+		list_del_init(&ddb_idx->list);
+		qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
+		vfree(ddb_idx);
+	}
+
+exit_login_st:
+	if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha,
+			   "Unable to clear DDB index = 0x%x\n", ddb_index);
+	}
+
+	clear_bit(ddb_index, ha->ddb_idx_map);
+
+exit_login_st_clr_bit:
+	clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+	return ret;
+}
+
+static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
+				struct dev_db_entry *fw_ddb_entry,
+				uint16_t idx)
+{
+	int ret = QLA_ERROR;
+
+	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
+	if (ret != QLA_SUCCESS)
+		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+					      idx);
+	else
+		ret = -EPERM;
+
+	return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_login - Login to the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This logs in to the specified target
+ **/
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+				   struct iscsi_bus_flash_conn *fnode_conn)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t options = 0;
+	int ret = 0;
+
+	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Target info is not persistent\n", __func__);
+		ret = -EIO;
+		goto exit_ddb_login;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		ret = -ENOMEM;
+		goto exit_ddb_login;
+	}
+
+	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (ret == QLA_ERROR)
+		goto exit_ddb_login;
+
+	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
+					   fnode_sess->target_id);
+	else
+		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
+					   fnode_sess->target_id);
+
+	if (ret > 0)
+		ret = -EIO;
+
+exit_ddb_login:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
+ * @cls_sess: pointer to session to be logged out
+ *
+ * This performs session log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry = NULL;
+	struct scsi_qla_host *ha;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	unsigned long flags;
+	unsigned long wtime;
+	uint32_t ddb_state;
+	int options;
+	int ret = 0;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	if (ddb_entry->ddb_type != FLASH_DDB) {
+		ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
+			   __func__);
+		ret = -ENXIO;
+		goto exit_ddb_logout;
+	}
+
+	if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Logout from boot target entry is not permitted.\n",
+			   __func__);
+		ret = -EPERM;
+		goto exit_ddb_logout;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+		goto ddb_logout_init;
+
+	ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+				      fw_ddb_entry, fw_ddb_entry_dma,
+				      NULL, NULL, &ddb_state, NULL,
+				      NULL, NULL);
+	if (ret == QLA_ERROR)
+		goto ddb_logout_init;
+
+	if (ddb_state == DDB_DS_SESSION_ACTIVE)
+		goto ddb_logout_init;
+
+	/* wait until next relogin is triggered using DF_RELOGIN and
+	 * clear DF_RELOGIN to avoid invocation of further relogin
+	 */
+	wtime = jiffies + (HZ * RELOGIN_TOV);
+	do {
+		if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
+			goto ddb_logout_init;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+ddb_logout_init:
+	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+	atomic_set(&ddb_entry->relogin_timer, 0);
+
+	options = LOGOUT_OPTION_CLOSE_SESSION;
+	qla4xxx_session_logout_ddb(ha, ddb_entry, options);
+
+	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+	wtime = jiffies + (HZ * LOGOUT_TOV);
+	do {
+		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+					      fw_ddb_entry, fw_ddb_entry_dma,
+					      NULL, NULL, &ddb_state, NULL,
+					      NULL, NULL);
+		if (ret == QLA_ERROR)
+			goto ddb_logout_clr_sess;
+
+		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+		    (ddb_state == DDB_DS_SESSION_FAILED))
+			goto ddb_logout_clr_sess;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+ddb_logout_clr_sess:
+	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+	/*
+	 * we have decremented the reference count of the driver
+	 * when we setup the session to have the driver unload
+	 * to be seamless without actually destroying the
+	 * session
+	 **/
+	try_module_get(qla4xxx_iscsi_transport.owner);
+	iscsi_destroy_endpoint(ddb_entry->conn->ep);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qla4xxx_free_ddb(ha, ddb_entry);
+	clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	iscsi_session_teardown(ddb_entry->sess);
+
+	clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
+	ret = QLA_SUCCESS;
+
+exit_ddb_logout:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout - Logout from the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This performs log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct ql4_tuple_ddb *flash_tddb = NULL;
+	struct ql4_tuple_ddb *tmp_tddb = NULL;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	struct ddb_entry *ddb_entry = NULL;
+	dma_addr_t fw_ddb_dma;
+	uint32_t next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t conn_id = 0;
+	int idx, index;
+	int status, ret = 0;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	flash_tddb = vzalloc(sizeof(*flash_tddb));
+	if (!flash_tddb) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s:Memory Allocation failed.\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+	if (!tmp_tddb) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s:Memory Allocation failed.\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	if (!fnode_sess->targetname) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s:Cannot logout from SendTarget entry\n",
+			   __func__);
+		ret = -EPERM;
+		goto exit_ddb_logout;
+	}
+
+	if (fnode_sess->is_boot_target) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Logout from boot target entry is not permitted.\n",
+			   __func__);
+		ret = -EPERM;
+		goto exit_ddb_logout;
+	}
+
+	strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+		ISCSI_NAME_SIZE);
+
+	if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
+	else
+		sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
+
+	flash_tddb->tpgt = fnode_sess->tpgt;
+	flash_tddb->port = fnode_conn->port;
+
+	COPY_ISID(flash_tddb->isid, fnode_sess->isid);
+
+	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		if (ddb_entry->ddb_type != FLASH_DDB)
+			continue;
+
+		index = ddb_entry->sess->target_id;
+		status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
+						 fw_ddb_dma, NULL, &next_idx,
+						 &state, &conn_err, NULL,
+						 &conn_id);
+		if (status == QLA_ERROR) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
+
+		status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
+						   true);
+		if (status == QLA_SUCCESS) {
+			ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
+			break;
+		}
+	}
+
+	if (idx == MAX_DDB_ENTRIES)
+		ret = -ESRCH;
+
+exit_ddb_logout:
+	if (flash_tddb)
+		vfree(flash_tddb);
+	if (tmp_tddb)
+		vfree(tmp_tddb);
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+	return ret;
+}
+
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+			    int param, char *buf)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_bus_flash_conn *fnode_conn;
+	struct ql4_chap_table chap_tbl;
+	struct device *dev;
+	int parent_type;
+	int rc = 0;
+
+	dev = iscsi_find_flashnode_conn(fnode_sess);
+	if (!dev)
+		return -EIO;
+
+	fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+	switch (param) {
+	case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+		rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
+		break;
+	case ISCSI_FLASHNODE_PORTAL_TYPE:
+		rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
+		break;
+	case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_SESS:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
+		break;
+	case ISCSI_FLASHNODE_ENTRY_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
+		break;
+	case ISCSI_FLASHNODE_HDR_DGST_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
+		break;
+	case ISCSI_FLASHNODE_DATA_DGST_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
+		break;
+	case ISCSI_FLASHNODE_IMM_DATA_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
+		break;
+	case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
+		break;
+	case ISCSI_FLASHNODE_DATASEQ_INORDER:
+		rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
+		break;
+	case ISCSI_FLASHNODE_PDU_INORDER:
+		rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
+		break;
+	case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
+		break;
+	case ISCSI_FLASHNODE_SNACK_REQ_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
+		break;
+	case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
+		break;
+	case ISCSI_FLASHNODE_ERL:
+		rc = sprintf(buf, "%u\n", fnode_sess->erl);
+		break;
+	case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
+		break;
+	case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
+		break;
+	case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
+		break;
+	case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
+		break;
+	case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
+		break;
+	case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
+		break;
+	case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+		rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
+		break;
+	case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+		rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
+		break;
+	case ISCSI_FLASHNODE_FIRST_BURST:
+		rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
+		break;
+	case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+		rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
+		break;
+	case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+		rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
+		break;
+	case ISCSI_FLASHNODE_MAX_R2T:
+		rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
+		break;
+	case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
+		break;
+	case ISCSI_FLASHNODE_ISID:
+		rc = sprintf(buf, "%pm\n", fnode_sess->isid);
+		break;
+	case ISCSI_FLASHNODE_TSID:
+		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
+		break;
+	case ISCSI_FLASHNODE_PORT:
+		rc = sprintf(buf, "%d\n", fnode_conn->port);
+		break;
+	case ISCSI_FLASHNODE_MAX_BURST:
+		rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
+		break;
+	case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+		rc = sprintf(buf, "%u\n",
+			     fnode_sess->default_taskmgmt_timeout);
+		break;
+	case ISCSI_FLASHNODE_IPADDR:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
+		else
+			rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
+		break;
+	case ISCSI_FLASHNODE_ALIAS:
+		if (fnode_sess->targetalias)
+			rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%pI6\n",
+				     fnode_conn->redirect_ipaddr);
+		else
+			rc = sprintf(buf, "%pI4\n",
+				     fnode_conn->redirect_ipaddr);
+		break;
+	case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+		rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
+		break;
+	case ISCSI_FLASHNODE_LOCAL_PORT:
+		rc = sprintf(buf, "%u\n", fnode_conn->local_port);
+		break;
+	case ISCSI_FLASHNODE_IPV4_TOS:
+		rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
+		break;
+	case ISCSI_FLASHNODE_IPV6_TC:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%u\n",
+				     fnode_conn->ipv6_traffic_class);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+		rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
+		break;
+	case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%pI6\n",
+				     fnode_conn->link_local_ipv6_addr);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+		if (fnode_sess->discovery_parent_type == DDB_ISNS)
+			parent_type = ISCSI_DISC_PARENT_ISNS;
+		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
+			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+		else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+			parent_type = ISCSI_DISC_PARENT_SENDTGT;
+		else
+			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+
+		rc = sprintf(buf, "%s\n",
+			     iscsi_get_discovery_parent_name(parent_type));
+		break;
+	case ISCSI_FLASHNODE_NAME:
+		if (fnode_sess->targetname)
+			rc = sprintf(buf, "%s\n", fnode_sess->targetname);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_TPGT:
+		rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
+		break;
+	case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
+		break;
+	case ISCSI_FLASHNODE_TCP_RECV_WSF:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
+		break;
+	case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+		rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
+		break;
+	case ISCSI_FLASHNODE_USERNAME:
+		if (fnode_sess->chap_auth_en) {
+			qla4xxx_get_uni_chap_at_index(ha,
+						      chap_tbl.name,
+						      chap_tbl.secret,
+						      fnode_sess->chap_out_idx);
+			rc = sprintf(buf, "%s\n", chap_tbl.name);
+		} else {
+			rc = sprintf(buf, "\n");
+		}
+		break;
+	case ISCSI_FLASHNODE_PASSWORD:
+		if (fnode_sess->chap_auth_en) {
+			qla4xxx_get_uni_chap_at_index(ha,
+						      chap_tbl.name,
+						      chap_tbl.secret,
+						      fnode_sess->chap_out_idx);
+			rc = sprintf(buf, "%s\n", chap_tbl.secret);
+		} else {
+			rc = sprintf(buf, "\n");
+		}
+		break;
+	case ISCSI_FLASHNODE_STATSN:
+		rc = sprintf(buf, "%u\n", fnode_conn->statsn);
+		break;
+	case ISCSI_FLASHNODE_EXP_STATSN:
+		rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
+		break;
+	case ISCSI_FLASHNODE_IS_BOOT_TGT:
+		rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
+		break;
+	default:
+		rc = -ENOSYS;
+		break;
+	}
+
+	put_device(dev);
+	return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ * @data: Parameters and their values to update
+ * @len: len of data
+ *
+ * This sets the parameter of flash ddb entry and writes them to flash
+ **/
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+			    struct iscsi_bus_flash_conn *fnode_conn,
+			    void *data, int len)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_flashnode_param_info *fnode_param;
+	struct ql4_chap_table chap_tbl;
+	struct nlattr *attr;
+	uint16_t chap_out_idx = INVALID_ENTRY;
+	int rc = QLA_ERROR;
+	uint32_t rem = len;
+
+	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
+	nla_for_each_attr(attr, data, len, rem) {
+		fnode_param = nla_data(attr);
+
+		switch (fnode_param->param) {
+		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+			fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_PORTAL_TYPE:
+			memcpy(fnode_sess->portal_type, fnode_param->value,
+			       strlen(fnode_sess->portal_type));
+			break;
+		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+			fnode_sess->auto_snd_tgt_disable =
+							fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_SESS:
+			fnode_sess->discovery_sess = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_ENTRY_EN:
+			fnode_sess->entry_state = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_HDR_DGST_EN:
+			fnode_conn->hdrdgst_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DATA_DGST_EN:
+			fnode_conn->datadgst_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IMM_DATA_EN:
+			fnode_sess->imm_data_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+			fnode_sess->initial_r2t_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DATASEQ_INORDER:
+			fnode_sess->dataseq_inorder_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_PDU_INORDER:
+			fnode_sess->pdu_inorder_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+			fnode_sess->chap_auth_en = fnode_param->value[0];
+			/* Invalidate chap index if chap auth is disabled */
+			if (!fnode_sess->chap_auth_en)
+				fnode_sess->chap_out_idx = INVALID_ENTRY;
+
+			break;
+		case ISCSI_FLASHNODE_SNACK_REQ_EN:
+			fnode_conn->snack_req_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+			fnode_sess->discovery_logout_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+			fnode_sess->bidi_chap_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+			fnode_sess->discovery_auth_optional =
+							fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_ERL:
+			fnode_sess->erl = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+			fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+			fnode_conn->tcp_nagle_disable = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+			fnode_conn->tcp_wsf_disable = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+			fnode_conn->tcp_timer_scale = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+			fnode_conn->tcp_timestamp_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+			fnode_conn->fragment_disable = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+			fnode_conn->max_recv_dlength =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+			fnode_conn->max_xmit_dlength =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_FIRST_BURST:
+			fnode_sess->first_burst =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+			fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+			fnode_sess->time2retain =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_MAX_R2T:
+			fnode_sess->max_r2t =
+					*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+			fnode_conn->keepalive_timeout =
+				*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_ISID:
+			memcpy(fnode_sess->isid, fnode_param->value,
+			       sizeof(fnode_sess->isid));
+			break;
+		case ISCSI_FLASHNODE_TSID:
+			fnode_sess->tsid = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_PORT:
+			fnode_conn->port = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_MAX_BURST:
+			fnode_sess->max_burst = *(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+			fnode_sess->default_taskmgmt_timeout =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_IPADDR:
+			memcpy(fnode_conn->ipaddress, fnode_param->value,
+			       IPv6_ADDR_LEN);
+			break;
+		case ISCSI_FLASHNODE_ALIAS:
+			rc = iscsi_switch_str_param(&fnode_sess->targetalias,
+						    (char *)fnode_param->value);
+			break;
+		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+			memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
+			       IPv6_ADDR_LEN);
+			break;
+		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+			fnode_conn->max_segment_size =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_LOCAL_PORT:
+			fnode_conn->local_port =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_IPV4_TOS:
+			fnode_conn->ipv4_tos = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IPV6_TC:
+			fnode_conn->ipv6_traffic_class = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+			fnode_conn->ipv6_flow_label = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_NAME:
+			rc = iscsi_switch_str_param(&fnode_sess->targetname,
+						    (char *)fnode_param->value);
+			break;
+		case ISCSI_FLASHNODE_TPGT:
+			fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+			memcpy(fnode_conn->link_local_ipv6_addr,
+			       fnode_param->value, IPv6_ADDR_LEN);
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+			fnode_sess->discovery_parent_idx =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+			fnode_conn->tcp_xmit_wsf =
+						*(uint8_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_TCP_RECV_WSF:
+			fnode_conn->tcp_recv_wsf =
+						*(uint8_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_STATSN:
+			fnode_conn->statsn = *(uint32_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_EXP_STATSN:
+			fnode_conn->exp_statsn =
+						*(uint32_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+			chap_out_idx = *(uint16_t *)fnode_param->value;
+			if (!qla4xxx_get_uni_chap_at_index(ha,
+							   chap_tbl.name,
+							   chap_tbl.secret,
+							   chap_out_idx)) {
+				fnode_sess->chap_out_idx = chap_out_idx;
+				/* Enable chap auth if chap index is valid */
+				fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
+			}
+			break;
+		default:
+			ql4_printk(KERN_ERR, ha,
+				   "%s: No such sysfs attribute\n", __func__);
+			rc = -ENOSYS;
+			goto exit_set_param;
+		}
+	}
+
+	rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
+
+exit_set_param:
+	return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ *
+ * This invalidates the flash ddb entry at the given index
+ **/
+static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	uint32_t dev_db_start_offset;
+	uint32_t dev_db_end_offset;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint16_t *ddb_cookie = NULL;
+	size_t ddb_size = 0;
+	void *pddb = NULL;
+	int target_id;
+	int rc = 0;
+
+	if (fnode_sess->is_boot_target) {
+		rc = -EPERM;
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Deletion of boot target entry is not permitted.\n",
+				  __func__));
+		goto exit_ddb_del;
+	}
+
+	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
+		goto sysfs_ddb_del;
+
+	if (is_qla40XX(ha)) {
+		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+		dev_db_end_offset = FLASH_OFFSET_DB_END;
+		dev_db_start_offset += (fnode_sess->target_id *
+				       sizeof(*fw_ddb_entry));
+		ddb_size = sizeof(*fw_ddb_entry);
+	} else {
+		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+				      (ha->hw.flt_region_ddb << 2);
+		/* flt_ddb_size is DDB table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+		dev_db_end_offset = dev_db_start_offset +
+				    (ha->hw.flt_ddb_size / 2);
+
+		dev_db_start_offset += (fnode_sess->target_id *
+				       sizeof(*fw_ddb_entry));
+		dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
+
+		ddb_size = sizeof(*ddb_cookie);
+	}
+
+	DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
+			  __func__, dev_db_start_offset, dev_db_end_offset));
+
+	if (dev_db_start_offset > dev_db_end_offset) {
+		rc = -EIO;
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
+				  __func__, fnode_sess->target_id));
+		goto exit_ddb_del;
+	}
+
+	pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
+				  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!pddb) {
+		rc = -ENOMEM;
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		goto exit_ddb_del;
+	}
+
+	if (is_qla40XX(ha)) {
+		fw_ddb_entry = pddb;
+		memset(fw_ddb_entry, 0, ddb_size);
+		ddb_cookie = &fw_ddb_entry->cookie;
+	} else {
+		ddb_cookie = pddb;
+	}
+
+	/* invalidate the cookie */
+	*ddb_cookie = 0xFFEE;
+	qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+			  ddb_size, FLASH_OPT_RMW_COMMIT);
+
+sysfs_ddb_del:
+	target_id = fnode_sess->target_id;
+	iscsi_destroy_flashnode_sess(fnode_sess);
+	ql4_printk(KERN_INFO, ha,
+		   "%s: session and conn entries for flashnode %u of host %lu deleted\n",
+		   __func__, target_id, ha->host_no);
+exit_ddb_del:
+	if (pddb)
+		dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
+				  fw_ddb_entry_dma);
+	return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
+ * @ha: pointer to adapter structure
+ *
+ * Export the firmware DDB for all send targets and normal targets to sysfs.
+ **/
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
+{
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint16_t max_ddbs;
+	uint16_t idx = 0;
+	int ret = QLA_SUCCESS;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+					  sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		return -ENOMEM;
+	}
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+				     MAX_DEV_DB_ENTRIES;
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
+					     idx))
+			continue;
+
+		ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
+		if (ret) {
+			ret = -EIO;
+			break;
+		}
+	}
+
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
+			  fw_ddb_entry_dma);
+
+	return ret;
+}
+
+static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
+{
+	iscsi_destroy_all_flashnode(ha->host);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+	uint16_t tmo = 0;
+	struct list_head list_st, list_nt;
+	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+	unsigned long wtime;
+
+	if (!test_bit(AF_LINK_UP, &ha->flags)) {
+		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+		ha->is_reset = is_reset;
+		return;
+	}
+
+	INIT_LIST_HEAD(&list_st);
+	INIT_LIST_HEAD(&list_nt);
+
+	qla4xxx_build_st_list(ha, &list_st);
+
+	/* Before issuing conn open mbox, ensure all IPs states are configured
+	 * Note, conn open fails if IPs are not configured
+	 */
+	qla4xxx_wait_for_ip_configuration(ha);
+
+	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+	}
+
+	/* Wait to ensure all sendtargets are done for min 12 sec wait */
+	tmo = ((ha->def_timeout > LOGIN_TOV) &&
+	       (ha->def_timeout < LOGIN_TOV * 10) ?
+	       ha->def_timeout : LOGIN_TOV);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Default time to wait for build ddb %d\n", tmo));
+
+	wtime = jiffies + (HZ * tmo);
+	do {
+		if (list_empty(&list_st))
+			break;
+
+		qla4xxx_remove_failed_ddb(ha, &list_st);
+		schedule_timeout_uninterruptible(HZ / 10);
+	} while (time_after(wtime, jiffies));
+
+
+	qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
+
+	qla4xxx_free_ddb_list(&list_st);
+	qla4xxx_free_ddb_list(&list_nt);
+
+	qla4xxx_free_ddb_index(ha);
+}
+
+/**
+ * qla4xxx_wait_login_resp_boot_tgt -  Wait for iSCSI boot target login
+ * response.
+ * @ha: pointer to adapter structure
+ *
+ * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
+ * set in DDB and we will wait for login response of boot targets during
+ * probe.
+ **/
+static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
+{
+	struct ddb_entry *ddb_entry;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	unsigned long wtime;
+	uint32_t ddb_state;
+	int max_ddbs, idx, ret;
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto exit_login_resp;
+	}
+
+	wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "%s: DDB index [%d]\n", __func__,
+					  ddb_entry->fw_ddb_index));
+			do {
+				ret = qla4xxx_get_fwddb_entry(ha,
+						ddb_entry->fw_ddb_index,
+						fw_ddb_entry, fw_ddb_entry_dma,
+						NULL, NULL, &ddb_state, NULL,
+						NULL, NULL);
+				if (ret == QLA_ERROR)
+					goto exit_login_resp;
+
+				if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
+				    (ddb_state == DDB_DS_SESSION_FAILED))
+					break;
+
+				schedule_timeout_uninterruptible(HZ);
+
+			} while ((time_after(wtime, jiffies)));
+
+			if (!time_after(wtime, jiffies)) {
+				DEBUG2(ql4_printk(KERN_INFO, ha,
+						  "%s: Login response wait timer expired\n",
+						  __func__));
+				 goto exit_login_resp;
+			}
+		}
+	}
+
+exit_login_resp:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/**
+ * qla4xxx_probe_adapter - callback function to probe HBA
+ * @pdev: pointer to pci_dev structure
+ * @pci_device_id: pointer to pci_device entry
+ *
+ * This routine will probe for Qlogic 4xxx iSCSI host adapters.
+ * It returns zero if successful. It also initializes all data necessary for
+ * the driver.
+ **/
+static int qla4xxx_probe_adapter(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	int ret = -ENODEV, status;
+	struct Scsi_Host *host;
+	struct scsi_qla_host *ha;
+	uint8_t init_retry_count = 0;
+	char buf[34];
+	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
+	uint32_t dev_state;
+
+	if (pci_enable_device(pdev))
+		return -1;
+
+	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
+	if (host == NULL) {
+		printk(KERN_WARNING
+		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
+		goto probe_disable_device;
+	}
+
+	/* Clear our data area */
+	ha = to_qla_host(host);
+	memset(ha, 0, sizeof(*ha));
+
+	/* Save the information from PCI BIOS.	*/
+	ha->pdev = pdev;
+	ha->host = host;
+	ha->host_no = host->host_no;
+	ha->func_num = PCI_FUNC(ha->pdev->devfn);
+
+	pci_enable_pcie_error_reporting(pdev);
+
+	/* Setup Runtime configurable options */
+	if (is_qla8022(ha)) {
+		ha->isp_ops = &qla4_82xx_isp_ops;
+		ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
+		ha->qdr_sn_window = -1;
+		ha->ddr_mn_window = -1;
+		ha->curr_window = 255;
+		nx_legacy_intr = &legacy_intr[ha->func_num];
+		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+		ha->nx_legacy_intr.tgt_status_reg =
+			nx_legacy_intr->tgt_status_reg;
+		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+	} else if (is_qla8032(ha) || is_qla8042(ha)) {
+		ha->isp_ops = &qla4_83xx_isp_ops;
+		ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
+	} else {
+		ha->isp_ops = &qla4xxx_isp_ops;
+	}
+
+	if (is_qla80XX(ha)) {
+		rwlock_init(&ha->hw_lock);
+		ha->pf_bit = ha->func_num << 16;
+		/* Set EEH reset type to fundamental if required by hba */
+		pdev->needs_freset = 1;
+	}
+
+	/* Configure PCI I/O space. */
+	ret = ha->isp_ops->iospace_config(ha);
+	if (ret)
+		goto probe_failed_ioconfig;
+
+	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
+		   pdev->device, pdev->irq, ha->reg);
+
+	qla4xxx_config_dma_addressing(ha);
+
+	/* Initialize lists and spinlocks. */
+	INIT_LIST_HEAD(&ha->free_srb_q);
+
+	mutex_init(&ha->mbox_sem);
+	mutex_init(&ha->chap_sem);
+	init_completion(&ha->mbx_intr_comp);
+	init_completion(&ha->disable_acb_comp);
+	init_completion(&ha->idc_comp);
+	init_completion(&ha->link_up_comp);
+
+	spin_lock_init(&ha->hardware_lock);
+	spin_lock_init(&ha->work_lock);
+
+	/* Initialize work list */
+	INIT_LIST_HEAD(&ha->work_list);
+
+	/* Allocate dma buffers */
+	if (qla4xxx_mem_alloc(ha)) {
+		ql4_printk(KERN_WARNING, ha,
+		    "[ERROR] Failed to allocate memory for adapter\n");
+
+		ret = -ENOMEM;
+		goto probe_failed;
+	}
+
+	host->cmd_per_lun = 3;
+	host->max_channel = 0;
+	host->max_lun = MAX_LUNS - 1;
+	host->max_id = MAX_TARGETS;
+	host->max_cmd_len = IOCB_MAX_CDB_LEN;
+	host->can_queue = MAX_SRBS ;
+	host->transportt = qla4xxx_scsi_transport;
+
+	pci_set_drvdata(pdev, ha);
+
+	ret = scsi_add_host(host, &pdev->dev);
+	if (ret)
+		goto probe_failed;
+
+	if (is_qla80XX(ha))
+		qla4_8xxx_get_flash_info(ha);
+
+	if (is_qla8032(ha) || is_qla8042(ha)) {
+		qla4_83xx_read_reset_template(ha);
+		/*
+		 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+		 * If DONRESET_BIT0 is set, drivers should not set dev_state
+		 * to NEED_RESET. But if NEED_RESET is set, drivers should
+		 * should honor the reset.
+		 */
+		if (ql4xdontresethba == 1)
+			qla4_83xx_set_idc_dontreset(ha);
+	}
+
+	/*
+	 * Initialize the Host adapter request/response queues and
+	 * firmware
+	 * NOTE: interrupts enabled upon successful completion
+	 */
+	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+
+	/* Dont retry adapter initialization if IRQ allocation failed */
+	if (is_qla80XX(ha) && (status == QLA_ERROR))
+		goto skip_retry_init;
+
+	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
+	    init_retry_count++ < MAX_INIT_RETRIES) {
+
+		if (is_qla80XX(ha)) {
+			ha->isp_ops->idc_lock(ha);
+			dev_state = qla4_8xxx_rd_direct(ha,
+							QLA8XXX_CRB_DEV_STATE);
+			ha->isp_ops->idc_unlock(ha);
+			if (dev_state == QLA8XXX_DEV_FAILED) {
+				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
+				    "initialize adapter. H/W is in failed state\n",
+				    __func__);
+				break;
+			}
+		}
+		DEBUG2(printk("scsi: %s: retrying adapter initialization "
+			      "(%d)\n", __func__, init_retry_count));
+
+		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
+			continue;
+
+		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
+		if (is_qla80XX(ha) && (status == QLA_ERROR)) {
+			if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
+				goto skip_retry_init;
+		}
+	}
+
+skip_retry_init:
+	if (!test_bit(AF_ONLINE, &ha->flags)) {
+		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
+
+		if ((is_qla8022(ha) && ql4xdontresethba) ||
+		    ((is_qla8032(ha) || is_qla8042(ha)) &&
+		     qla4_83xx_idc_dontreset(ha))) {
+			/* Put the device in failed state. */
+			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
+			ha->isp_ops->idc_lock(ha);
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_FAILED);
+			ha->isp_ops->idc_unlock(ha);
+		}
+		ret = -ENODEV;
+		goto remove_host;
+	}
+
+	/* Startup the kernel thread for this host adapter. */
+	DEBUG2(printk("scsi: %s: Starting kernel thread for "
+		      "qla4xxx_dpc\n", __func__));
+	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
+	ha->dpc_thread = create_singlethread_workqueue(buf);
+	if (!ha->dpc_thread) {
+		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
+		ret = -ENODEV;
+		goto remove_host;
+	}
+	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
+
+	ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
+				      ha->host_no);
+	if (!ha->task_wq) {
+		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
+		ret = -ENODEV;
+		goto remove_host;
+	}
+
+	/*
+	 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
+	 * (which is called indirectly by qla4xxx_initialize_adapter),
+	 * so that irqs will be registered after crbinit but before
+	 * mbx_intr_enable.
+	 */
+	if (is_qla40XX(ha)) {
+		ret = qla4xxx_request_irqs(ha);
+		if (ret) {
+			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
+			    "interrupt %d already in use.\n", pdev->irq);
+			goto remove_host;
+		}
+	}
+
+	pci_save_state(ha->pdev);
+	ha->isp_ops->enable_intrs(ha);
+
+	/* Start timer thread. */
+	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
+
+	set_bit(AF_INIT_DONE, &ha->flags);
+
+	qla4_8xxx_alloc_sysfs_attr(ha);
+
+	printk(KERN_INFO
+	       " QLogic iSCSI HBA Driver version: %s\n"
+	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
+	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
+	       ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
+	       ha->fw_info.fw_patch, ha->fw_info.fw_build);
+
+	/* Set the driver version */
+	if (is_qla80XX(ha))
+		qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
+
+	if (qla4xxx_setup_boot_info(ha))
+		ql4_printk(KERN_ERR, ha,
+			   "%s: No iSCSI boot target configured\n", __func__);
+
+	set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
+	/* Perform the build ddb list and login to each */
+	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+	qla4xxx_wait_login_resp_boot_tgt(ha);
+
+	qla4xxx_create_chap_list(ha);
+
+	qla4xxx_create_ifaces(ha);
+	return 0;
+
+remove_host:
+	scsi_remove_host(ha->host);
+
+probe_failed:
+	qla4xxx_free_adapter(ha);
+
+probe_failed_ioconfig:
+	pci_disable_pcie_error_reporting(pdev);
+	scsi_host_put(ha->host);
+
+probe_disable_device:
+	pci_disable_device(pdev);
+
+	return ret;
+}
+
+/**
+ * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
+ * @ha: pointer to adapter structure
+ *
+ * Mark the other ISP-4xxx port to indicate that the driver is being removed,
+ * so that the other port will not re-initialize while in the process of
+ * removing the ha due to driver unload or hba hotplug.
+ **/
+static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
+{
+	struct scsi_qla_host *other_ha = NULL;
+	struct pci_dev *other_pdev = NULL;
+	int fn = ISP4XXX_PCI_FN_2;
+
+	/*iscsi function numbers for ISP4xxx is 1 and 3*/
+	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
+		fn = ISP4XXX_PCI_FN_1;
+
+	other_pdev =
+		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+		fn));
+
+	/* Get other_ha if other_pdev is valid and state is enable*/
+	if (other_pdev) {
+		if (atomic_read(&other_pdev->enable_cnt)) {
+			other_ha = pci_get_drvdata(other_pdev);
+			if (other_ha) {
+				set_bit(AF_HA_REMOVAL, &other_ha->flags);
+				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
+				    "Prevent %s reinit\n", __func__,
+				    dev_name(&other_ha->pdev->dev)));
+			}
+		}
+		pci_dev_put(other_pdev);
+	}
+}
+
+static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
+		struct ddb_entry *ddb_entry)
+{
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	unsigned long wtime;
+	uint32_t ddb_state;
+	int options;
+	int status;
+
+	options = LOGOUT_OPTION_CLOSE_SESSION;
+	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+		goto clear_ddb;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		goto clear_ddb;
+	}
+
+	wtime = jiffies + (HZ * LOGOUT_TOV);
+	do {
+		status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+						 fw_ddb_entry, fw_ddb_entry_dma,
+						 NULL, NULL, &ddb_state, NULL,
+						 NULL, NULL);
+		if (status == QLA_ERROR)
+			goto free_ddb;
+
+		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+		    (ddb_state == DDB_DS_SESSION_FAILED))
+			goto free_ddb;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+free_ddb:
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+			  fw_ddb_entry, fw_ddb_entry_dma);
+clear_ddb:
+	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+}
+
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+	struct ddb_entry *ddb_entry;
+	int idx;
+
+	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if ((ddb_entry != NULL) &&
+		    (ddb_entry->ddb_type == FLASH_DDB)) {
+
+			qla4xxx_destroy_ddb(ha, ddb_entry);
+			/*
+			 * we have decremented the reference count of the driver
+			 * when we setup the session to have the driver unload
+			 * to be seamless without actually destroying the
+			 * session
+			 **/
+			try_module_get(qla4xxx_iscsi_transport.owner);
+			iscsi_destroy_endpoint(ddb_entry->conn->ep);
+			qla4xxx_free_ddb(ha, ddb_entry);
+			iscsi_session_teardown(ddb_entry->sess);
+		}
+	}
+}
+/**
+ * qla4xxx_remove_adapter - callback function to remove adapter.
+ * @pci_dev: PCI device pointer
+ **/
+static void qla4xxx_remove_adapter(struct pci_dev *pdev)
+{
+	struct scsi_qla_host *ha;
+
+	/*
+	 * If the PCI device is disabled then it means probe_adapter had
+	 * failed and resources already cleaned up on probe_adapter exit.
+	 */
+	if (!pci_is_enabled(pdev))
+		return;
+
+	ha = pci_get_drvdata(pdev);
+
+	if (is_qla40XX(ha))
+		qla4xxx_prevent_other_port_reinit(ha);
+
+	/* destroy iface from sysfs */
+	qla4xxx_destroy_ifaces(ha);
+
+	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
+		iscsi_boot_destroy_kset(ha->boot_kset);
+
+	qla4xxx_destroy_fw_ddb_session(ha);
+	qla4_8xxx_free_sysfs_attr(ha);
+
+	qla4xxx_sysfs_ddb_remove(ha);
+	scsi_remove_host(ha->host);
+
+	qla4xxx_free_adapter(ha);
+
+	scsi_host_put(ha->host);
+
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
+ * @ha: HA context
+ *
+ * At exit, the @ha's flags.enable_64bit_addressing set to indicated
+ * supported addressing method.
+ */
+static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
+{
+	int retval;
+
+	/* Update our PCI device dma_mask for full 64 bit mask */
+	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
+		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+			dev_dbg(&ha->pdev->dev,
+				  "Failed to set 64 bit PCI consistent mask; "
+				   "using 32 bit.\n");
+			retval = pci_set_consistent_dma_mask(ha->pdev,
+							     DMA_BIT_MASK(32));
+		}
+	} else
+		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+}
+
+static int qla4xxx_slave_alloc(struct scsi_device *sdev)
+{
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb;
+	int queue_depth = QL4_DEF_QDEPTH;
+
+	cls_sess = starget_to_session(sdev->sdev_target);
+	sess = cls_sess->dd_data;
+	ddb = sess->dd_data;
+
+	sdev->hostdata = ddb;
+
+	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
+		queue_depth = ql4xmaxqdepth;
+
+	scsi_change_queue_depth(sdev, queue_depth);
+	return 0;
+}
+
+/**
+ * qla4xxx_del_from_active_array - returns an active srb
+ * @ha: Pointer to host adapter structure.
+ * @index: index into the active_array
+ *
+ * This routine removes and returns the srb at the specified index
+ **/
+struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
+    uint32_t index)
+{
+	struct srb *srb = NULL;
+	struct scsi_cmnd *cmd = NULL;
+
+	cmd = scsi_host_find_tag(ha->host, index);
+	if (!cmd)
+		return srb;
+
+	srb = (struct srb *)CMD_SP(cmd);
+	if (!srb)
+		return srb;
+
+	/* update counters */
+	if (srb->flags & SRB_DMA_VALID) {
+		ha->iocb_cnt -= srb->iocb_cnt;
+		if (srb->cmd)
+			srb->cmd->host_scribble =
+				(unsigned char *)(unsigned long) MAX_SRBS;
+	}
+	return srb;
+}
+
+/**
+ * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
+ * @ha: Pointer to host adapter structure.
+ * @cmd: Scsi Command to wait on.
+ *
+ * This routine waits for the command to be returned by the Firmware
+ * for some max time.
+ **/
+static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
+				      struct scsi_cmnd *cmd)
+{
+	int done = 0;
+	struct srb *rp;
+	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
+	int ret = SUCCESS;
+
+	/* Dont wait on command if PCI error is being handled
+	 * by PCI AER driver
+	 */
+	if (unlikely(pci_channel_offline(ha->pdev)) ||
+	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
+		    ha->host_no, __func__);
+		return ret;
+	}
+
+	do {
+		/* Checking to see if its returned to OS */
+		rp = (struct srb *) CMD_SP(cmd);
+		if (rp == NULL) {
+			done++;
+			break;
+		}
+
+		msleep(2000);
+	} while (max_wait_time--);
+
+	return done;
+}
+
+/**
+ * qla4xxx_wait_for_hba_online - waits for HBA to come online
+ * @ha: Pointer to host adapter structure
+ **/
+static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
+{
+	unsigned long wait_online;
+
+	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
+	while (time_before(jiffies, wait_online)) {
+
+		if (adapter_up(ha))
+			return QLA_SUCCESS;
+
+		msleep(2000);
+	}
+
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
+ * @ha: pointer to HBA
+ * @t: target id
+ * @l: lun id
+ *
+ * This function waits for all outstanding commands to a lun to complete. It
+ * returns 0 if all pending commands are returned and 1 otherwise.
+ **/
+static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
+					struct scsi_target *stgt,
+					struct scsi_device *sdev)
+{
+	int cnt;
+	int status = 0;
+	struct scsi_cmnd *cmd;
+
+	/*
+	 * Waiting for all commands for the designated target or dev
+	 * in the active array
+	 */
+	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
+		cmd = scsi_host_find_tag(ha->host, cnt);
+		if (cmd && stgt == scsi_target(cmd->device) &&
+		    (!sdev || sdev == cmd->device)) {
+			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+				status++;
+				break;
+			}
+		}
+	}
+	return status;
+}
+
+/**
+ * qla4xxx_eh_abort - callback for abort task.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to abort the specified
+ * command.
+ **/
+static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+	unsigned int id = cmd->device->id;
+	uint64_t lun = cmd->device->lun;
+	unsigned long flags;
+	struct srb *srb = NULL;
+	int ret = SUCCESS;
+	int wait = 0;
+	int rval;
+
+	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
+		   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
+
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	srb = (struct srb *) CMD_SP(cmd);
+	if (!srb) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
+			   ha->host_no, id, lun);
+		return SUCCESS;
+	}
+	kref_get(&srb->srb_ref);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
+		DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
+		    ha->host_no, id, lun));
+		ret = FAILED;
+	} else {
+		DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
+		    ha->host_no, id, lun));
+		wait = 1;
+	}
+
+	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
+
+	/* Wait for command to complete */
+	if (wait) {
+		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
+			DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
+			    ha->host_no, id, lun));
+			ret = FAILED;
+		}
+	}
+
+	ql4_printk(KERN_INFO, ha,
+	    "scsi%ld:%d:%llu: Abort command - %s\n",
+	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
+
+	return ret;
+}
+
+/**
+ * qla4xxx_eh_device_reset - callback for target reset.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to reset all luns on the
+ * specified target.
+ **/
+static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+	struct ddb_entry *ddb_entry = cmd->device->hostdata;
+	int ret = FAILED, stat;
+	int rval;
+
+	if (!ddb_entry)
+		return ret;
+
+	ret = iscsi_block_scsi_eh(cmd);
+	if (ret)
+		return ret;
+	ret = FAILED;
+
+	ql4_printk(KERN_INFO, ha,
+		   "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
+		   cmd->device->channel, cmd->device->id, cmd->device->lun);
+
+	DEBUG2(printk(KERN_INFO
+		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
+		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
+		      cmd, jiffies, cmd->request->timeout / HZ,
+		      ha->dpc_flags, cmd->result, cmd->allowed));
+
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	/* FIXME: wait for hba to go online */
+	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
+	if (stat != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
+		goto eh_dev_reset_done;
+	}
+
+	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
+					 cmd->device)) {
+		ql4_printk(KERN_INFO, ha,
+			   "DEVICE RESET FAILED - waiting for "
+			   "commands.\n");
+		goto eh_dev_reset_done;
+	}
+
+	/* Send marker. */
+	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
+		MM_LUN_RESET) != QLA_SUCCESS)
+		goto eh_dev_reset_done;
+
+	ql4_printk(KERN_INFO, ha,
+		   "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
+		   ha->host_no, cmd->device->channel, cmd->device->id,
+		   cmd->device->lun);
+
+	ret = SUCCESS;
+
+eh_dev_reset_done:
+
+	return ret;
+}
+
+/**
+ * qla4xxx_eh_target_reset - callback for target reset.
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is called by the Linux OS to reset the target.
+ **/
+static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
+	struct ddb_entry *ddb_entry = cmd->device->hostdata;
+	int stat, ret;
+	int rval;
+
+	if (!ddb_entry)
+		return FAILED;
+
+	ret = iscsi_block_scsi_eh(cmd);
+	if (ret)
+		return ret;
+
+	starget_printk(KERN_INFO, scsi_target(cmd->device),
+		       "WARM TARGET RESET ISSUED.\n");
+
+	DEBUG2(printk(KERN_INFO
+		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
+		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
+		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
+		      ha->dpc_flags, cmd->result, cmd->allowed));
+
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	stat = qla4xxx_reset_target(ha, ddb_entry);
+	if (stat != QLA_SUCCESS) {
+		starget_printk(KERN_INFO, scsi_target(cmd->device),
+			       "WARM TARGET RESET FAILED.\n");
+		return FAILED;
+	}
+
+	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
+					 NULL)) {
+		starget_printk(KERN_INFO, scsi_target(cmd->device),
+			       "WARM TARGET DEVICE RESET FAILED - "
+			       "waiting for commands.\n");
+		return FAILED;
+	}
+
+	/* Send marker. */
+	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
+		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
+		starget_printk(KERN_INFO, scsi_target(cmd->device),
+			       "WARM TARGET DEVICE RESET FAILED - "
+			       "marker iocb failed.\n");
+		return FAILED;
+	}
+
+	starget_printk(KERN_INFO, scsi_target(cmd->device),
+		       "WARM TARGET RESET SUCCEEDED.\n");
+	return SUCCESS;
+}
+
+/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+	if (shost->shost_state == SHOST_RECOVERY)
+		return 1;
+	return 0;
+}
+
+/**
+ * qla4xxx_eh_host_reset - kernel callback
+ * @cmd: Pointer to Linux's SCSI command structure
+ *
+ * This routine is invoked by the Linux kernel to perform fatal error
+ * recovery on the specified adapter.
+ **/
+static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
+{
+	int return_status = FAILED;
+	struct scsi_qla_host *ha;
+	int rval;
+
+	ha = to_qla_host(cmd->device->host);
+
+	rval = qla4xxx_isp_check_reg(ha);
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
+		qla4_83xx_set_idc_dontreset(ha);
+
+	/*
+	 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
+	 * protocol drivers, we should not set device_state to NEED_RESET
+	 */
+	if (ql4xdontresethba ||
+	    ((is_qla8032(ha) || is_qla8042(ha)) &&
+	     qla4_83xx_idc_dontreset(ha))) {
+		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
+		     ha->host_no, __func__));
+
+		/* Clear outstanding srb in queues */
+		if (qla4xxx_is_eh_active(cmd->device->host))
+			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
+		return FAILED;
+	}
+
+	ql4_printk(KERN_INFO, ha,
+		   "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
+		   cmd->device->channel, cmd->device->id, cmd->device->lun);
+
+	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
+		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
+			      "DEAD.\n", ha->host_no, cmd->device->channel,
+			      __func__));
+
+		return FAILED;
+	}
+
+	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+		if (is_qla80XX(ha))
+			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+		else
+			set_bit(DPC_RESET_HA, &ha->dpc_flags);
+	}
+
+	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
+		return_status = SUCCESS;
+
+	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
+		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
+
+	return return_status;
+}
+
+static int qla4xxx_context_reset(struct scsi_qla_host *ha)
+{
+	uint32_t mbox_cmd[MBOX_REG_COUNT];
+	uint32_t mbox_sts[MBOX_REG_COUNT];
+	struct addr_ctrl_blk_def *acb = NULL;
+	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
+	int rval = QLA_SUCCESS;
+	dma_addr_t acb_dma;
+
+	acb = dma_alloc_coherent(&ha->pdev->dev,
+				 sizeof(struct addr_ctrl_blk_def),
+				 &acb_dma, GFP_KERNEL);
+	if (!acb) {
+		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+			   __func__);
+		rval = -ENOMEM;
+		goto exit_port_reset;
+	}
+
+	memset(acb, 0, acb_len);
+
+	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
+	if (rval != QLA_SUCCESS) {
+		rval = -EIO;
+		goto exit_free_acb;
+	}
+
+	rval = qla4xxx_disable_acb(ha);
+	if (rval != QLA_SUCCESS) {
+		rval = -EIO;
+		goto exit_free_acb;
+	}
+
+	wait_for_completion_timeout(&ha->disable_acb_comp,
+				    DISABLE_ACB_TOV * HZ);
+
+	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+	if (rval != QLA_SUCCESS) {
+		rval = -EIO;
+		goto exit_free_acb;
+	}
+
+exit_free_acb:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
+			  acb, acb_dma);
+exit_port_reset:
+	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
+			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+	return rval;
+}
+
+static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	int rval = QLA_SUCCESS;
+	uint32_t idc_ctrl;
+
+	if (ql4xdontresethba) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
+				  __func__));
+		rval = -EPERM;
+		goto exit_host_reset;
+	}
+
+	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+		goto recover_adapter;
+
+	switch (reset_type) {
+	case SCSI_ADAPTER_RESET:
+		set_bit(DPC_RESET_HA, &ha->dpc_flags);
+		break;
+	case SCSI_FIRMWARE_RESET:
+		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+			if (is_qla80XX(ha))
+				/* set firmware context reset */
+				set_bit(DPC_RESET_HA_FW_CONTEXT,
+					&ha->dpc_flags);
+			else {
+				rval = qla4xxx_context_reset(ha);
+				goto exit_host_reset;
+			}
+		}
+		break;
+	}
+
+recover_adapter:
+	/* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
+	 * reset is issued by application */
+	if ((is_qla8032(ha) || is_qla8042(ha)) &&
+	    test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+				 (idc_ctrl | GRACEFUL_RESET_BIT1));
+	}
+
+	rval = qla4xxx_recover_adapter(ha);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
+				  __func__));
+		rval = -EIO;
+	}
+
+exit_host_reset:
+	return rval;
+}
+
+/* PCI AER driver recovers from all correctable errors w/o
+ * driver intervention. For uncorrectable errors PCI AER
+ * driver calls the following device driver's callbacks
+ *
+ * - Fatal Errors - link_reset
+ * - Non-Fatal Errors - driver's error_detected() which
+ * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
+ *
+ * PCI AER driver calls
+ * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled()
+ *               returns RECOVERED or NEED_RESET if fw_hung
+ * NEED_RESET - driver's slot_reset()
+ * DISCONNECT - device is dead & cannot recover
+ * RECOVERED - driver's resume()
+ */
+static pci_ers_result_t
+qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+
+	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
+	    ha->host_no, __func__, state);
+
+	if (!is_aer_supported(ha))
+		return PCI_ERS_RESULT_NONE;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		clear_bit(AF_EEH_BUSY, &ha->flags);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		set_bit(AF_EEH_BUSY, &ha->flags);
+		qla4xxx_mailbox_premature_completion(ha);
+		qla4xxx_free_irqs(ha);
+		pci_disable_device(pdev);
+		/* Return back all IOs */
+		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		set_bit(AF_EEH_BUSY, &ha->flags);
+		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
+		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * qla4xxx_pci_mmio_enabled() gets called if
+ * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
+ * and read/write to the device still works.
+ **/
+static pci_ers_result_t
+qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+
+	if (!is_aer_supported(ha))
+		return PCI_ERS_RESULT_NONE;
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
+{
+	uint32_t rval = QLA_ERROR;
+	int fn;
+	struct pci_dev *other_pdev = NULL;
+
+	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
+
+	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+
+	if (test_bit(AF_ONLINE, &ha->flags)) {
+		clear_bit(AF_ONLINE, &ha->flags);
+		clear_bit(AF_LINK_UP, &ha->flags);
+		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
+		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
+	}
+
+	fn = PCI_FUNC(ha->pdev->devfn);
+	if (is_qla8022(ha)) {
+		while (fn > 0) {
+			fn--;
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
+				   ha->host_no, __func__, fn);
+			/* Get the pci device given the domain, bus,
+			 * slot/function number */
+			other_pdev = pci_get_domain_bus_and_slot(
+					   pci_domain_nr(ha->pdev->bus),
+					   ha->pdev->bus->number,
+					   PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+					   fn));
+
+			if (!other_pdev)
+				continue;
+
+			if (atomic_read(&other_pdev->enable_cnt)) {
+				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
+					   ha->host_no, __func__, fn);
+				pci_dev_put(other_pdev);
+				break;
+			}
+			pci_dev_put(other_pdev);
+		}
+	} else {
+		/* this case is meant for ISP83xx/ISP84xx only */
+		if (qla4_83xx_can_perform_reset(ha)) {
+			/* reset fn as iSCSI is going to perform the reset */
+			fn = 0;
+		}
+	}
+
+	/* The first function on the card, the reset owner will
+	 * start & initialize the firmware. The other functions
+	 * on the card will reset the firmware context
+	 */
+	if (!fn) {
+		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
+		    "0x%x is the owner\n", ha->host_no, __func__,
+		    ha->pdev->devfn);
+
+		ha->isp_ops->idc_lock(ha);
+		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+				    QLA8XXX_DEV_COLD);
+		ha->isp_ops->idc_unlock(ha);
+
+		rval = qla4_8xxx_update_idc_reg(ha);
+		if (rval == QLA_ERROR) {
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
+				   ha->host_no, __func__);
+			ha->isp_ops->idc_lock(ha);
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_FAILED);
+			ha->isp_ops->idc_unlock(ha);
+			goto exit_error_recovery;
+		}
+
+		clear_bit(AF_FW_RECOVERY, &ha->flags);
+		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+
+		if (rval != QLA_SUCCESS) {
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
+			    "FAILED\n", ha->host_no, __func__);
+			qla4xxx_free_irqs(ha);
+			ha->isp_ops->idc_lock(ha);
+			qla4_8xxx_clear_drv_active(ha);
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_FAILED);
+			ha->isp_ops->idc_unlock(ha);
+		} else {
+			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
+			    "READY\n", ha->host_no, __func__);
+			ha->isp_ops->idc_lock(ha);
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
+					    QLA8XXX_DEV_READY);
+			/* Clear driver state register */
+			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
+			qla4_8xxx_set_drv_active(ha);
+			ha->isp_ops->idc_unlock(ha);
+			ha->isp_ops->enable_intrs(ha);
+		}
+	} else {
+		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
+		    "the reset owner\n", ha->host_no, __func__,
+		    ha->pdev->devfn);
+		if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
+		     QLA8XXX_DEV_READY)) {
+			clear_bit(AF_FW_RECOVERY, &ha->flags);
+			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
+			if (rval == QLA_SUCCESS)
+				ha->isp_ops->enable_intrs(ha);
+			else
+				qla4xxx_free_irqs(ha);
+
+			ha->isp_ops->idc_lock(ha);
+			qla4_8xxx_set_drv_active(ha);
+			ha->isp_ops->idc_unlock(ha);
+		}
+	}
+exit_error_recovery:
+	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
+	return rval;
+}
+
+static pci_ers_result_t
+qla4xxx_pci_slot_reset(struct pci_dev *pdev)
+{
+	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
+	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+	int rc;
+
+	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
+	    ha->host_no, __func__);
+
+	if (!is_aer_supported(ha))
+		return PCI_ERS_RESULT_NONE;
+
+	/* Restore the saved state of PCIe device -
+	 * BAR registers, PCI Config space, PCIX, MSI,
+	 * IOV states
+	 */
+	pci_restore_state(pdev);
+
+	/* pci_restore_state() clears the saved_state flag of the device
+	 * save restored state which resets saved_state flag
+	 */
+	pci_save_state(pdev);
+
+	/* Initialize device or resume if in suspended state */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
+		    "device after reset\n", ha->host_no, __func__);
+		goto exit_slot_reset;
+	}
+
+	ha->isp_ops->disable_intrs(ha);
+
+	if (is_qla80XX(ha)) {
+		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
+			ret = PCI_ERS_RESULT_RECOVERED;
+			goto exit_slot_reset;
+		} else
+			goto exit_slot_reset;
+	}
+
+exit_slot_reset:
+	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
+	    "device after reset\n", ha->host_no, __func__, ret);
+	return ret;
+}
+
+static void
+qla4xxx_pci_resume(struct pci_dev *pdev)
+{
+	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
+	int ret;
+
+	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
+	    ha->host_no, __func__);
+
+	ret = qla4xxx_wait_for_hba_online(ha);
+	if (ret != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
+		    "resume I/O from slot/link_reset\n", ha->host_no,
+		     __func__);
+	}
+
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+	clear_bit(AF_EEH_BUSY, &ha->flags);
+}
+
+static const struct pci_error_handlers qla4xxx_err_handler = {
+	.error_detected = qla4xxx_pci_error_detected,
+	.mmio_enabled = qla4xxx_pci_mmio_enabled,
+	.slot_reset = qla4xxx_pci_slot_reset,
+	.resume = qla4xxx_pci_resume,
+};
+
+static struct pci_device_id qla4xxx_pci_tbl[] = {
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.vendor         = PCI_VENDOR_ID_QLOGIC,
+		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
+		.subvendor      = PCI_ANY_ID,
+		.subdevice      = PCI_ANY_ID,
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP8324,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_QLOGIC,
+		.device		= PCI_DEVICE_ID_QLOGIC_ISP8042,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+	},
+	{0, 0},
+};
+MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
+
+static struct pci_driver qla4xxx_pci_driver = {
+	.name		= DRIVER_NAME,
+	.id_table	= qla4xxx_pci_tbl,
+	.probe		= qla4xxx_probe_adapter,
+	.remove		= qla4xxx_remove_adapter,
+	.err_handler = &qla4xxx_err_handler,
+};
+
+static int __init qla4xxx_module_init(void)
+{
+	int ret;
+
+	if (ql4xqfulltracking)
+		qla4xxx_driver_template.track_queue_depth = 1;
+
+	/* Allocate cache for SRBs. */
+	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
+				       SLAB_HWCACHE_ALIGN, NULL);
+	if (srb_cachep == NULL) {
+		printk(KERN_ERR
+		       "%s: Unable to allocate SRB cache..."
+		       "Failing load!\n", DRIVER_NAME);
+		ret = -ENOMEM;
+		goto no_srp_cache;
+	}
+
+	/* Derive version string. */
+	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
+	if (ql4xextended_error_logging)
+		strcat(qla4xxx_version_str, "-debug");
+
+	qla4xxx_scsi_transport =
+		iscsi_register_transport(&qla4xxx_iscsi_transport);
+	if (!qla4xxx_scsi_transport){
+		ret = -ENODEV;
+		goto release_srb_cache;
+	}
+
+	ret = pci_register_driver(&qla4xxx_pci_driver);
+	if (ret)
+		goto unregister_transport;
+
+	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
+	return 0;
+
+unregister_transport:
+	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
+release_srb_cache:
+	kmem_cache_destroy(srb_cachep);
+no_srp_cache:
+	return ret;
+}
+
+static void __exit qla4xxx_module_exit(void)
+{
+	pci_unregister_driver(&qla4xxx_pci_driver);
+	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
+	kmem_cache_destroy(srb_cachep);
+}
+
+module_init(qla4xxx_module_init);
+module_exit(qla4xxx_module_exit);
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_version.h b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_version.h
new file mode 100644
index 0000000..f11eaa7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla4xxx/ql4_version.h
@@ -0,0 +1,8 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#define QLA4XXX_DRIVER_VERSION	"5.04.00-k6"