[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/Kconfig b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/Kconfig
new file mode 100644
index 0000000..036cc3f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/Kconfig
@@ -0,0 +1,49 @@
+config SCSI_QLA_FC
+	tristate "QLogic QLA2XXX Fibre Channel Support"
+	depends on PCI && SCSI
+	depends on SCSI_FC_ATTRS
+	depends on NVME_FC || !NVME_FC
+	select FW_LOADER
+	select BTREE
+	---help---
+	This qla2xxx driver supports all QLogic Fibre Channel
+	PCI and PCIe host adapters.
+
+	By default, firmware for the ISP parts will be loaded
+	via the Firmware Loader interface.
+
+	ISP               Firmware Filename
+	----------        -----------------
+	21xx              ql2100_fw.bin
+	22xx              ql2200_fw.bin
+	2300, 2312, 6312  ql2300_fw.bin
+	2322, 6322        ql2322_fw.bin
+	24xx, 54xx        ql2400_fw.bin
+	25xx              ql2500_fw.bin
+
+	Upon request, the driver caches the firmware image until
+	the driver is unloaded.
+
+	Firmware images can be retrieved from:
+
+		http://ldriver.qlogic.com/firmware/
+
+	They are also included in the linux-firmware tree as well.
+
+config TCM_QLA2XXX
+	tristate "TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs"
+	depends on SCSI_QLA_FC && TARGET_CORE
+	depends on LIBFC
+	select BTREE
+	default n
+	---help---
+	Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
+
+if TCM_QLA2XXX
+config TCM_QLA2XXX_DEBUG
+	bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
+	default n
+	---help---
+	Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
+	This will include code to enable the SCSI command jammer
+endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/Makefile b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/Makefile
new file mode 100644
index 0000000..17d5bc1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
+		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
+		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+
+obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_attr.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_attr.c
new file mode 100644
index 0000000..6562532
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_attr.c
@@ -0,0 +1,2327 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+static int qla24xx_vport_disable(struct fc_vport *, bool);
+
+/* SYSFS attributes --------------------------------------------------------- */
+
+static ssize_t
+qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
+			   struct bin_attribute *bin_attr,
+			   char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+
+	if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
+		return 0;
+
+	if (IS_P3P_TYPE(ha)) {
+		if (off < ha->md_template_size) {
+			rval = memory_read_from_buffer(buf, count,
+			    &off, ha->md_tmplt_hdr, ha->md_template_size);
+			return rval;
+		}
+		off -= ha->md_template_size;
+		rval = memory_read_from_buffer(buf, count,
+		    &off, ha->md_dump, ha->md_dump_size);
+		return rval;
+	} else if (ha->mctp_dumped && ha->mctp_dump_reading)
+		return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
+		    MCTP_DUMP_SIZE);
+	else if (ha->fw_dump_reading)
+		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+					ha->fw_dump_len);
+	else
+		return 0;
+}
+
+static ssize_t
+qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
+			    struct bin_attribute *bin_attr,
+			    char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	int reading;
+
+	if (off != 0)
+		return (0);
+
+	reading = simple_strtol(buf, NULL, 10);
+	switch (reading) {
+	case 0:
+		if (!ha->fw_dump_reading)
+			break;
+
+		ql_log(ql_log_info, vha, 0x705d,
+		    "Firmware dump cleared on (%ld).\n", vha->host_no);
+
+		if (IS_P3P_TYPE(ha)) {
+			qla82xx_md_free(vha);
+			qla82xx_md_prep(vha);
+		}
+		ha->fw_dump_reading = 0;
+		ha->fw_dumped = 0;
+		break;
+	case 1:
+		if (ha->fw_dumped && !ha->fw_dump_reading) {
+			ha->fw_dump_reading = 1;
+
+			ql_log(ql_log_info, vha, 0x705e,
+			    "Raw firmware dump ready for read on (%ld).\n",
+			    vha->host_no);
+		}
+		break;
+	case 2:
+		qla2x00_alloc_fw_dump(vha);
+		break;
+	case 3:
+		if (IS_QLA82XX(ha)) {
+			qla82xx_idc_lock(ha);
+			qla82xx_set_reset_owner(vha);
+			qla82xx_idc_unlock(ha);
+		} else if (IS_QLA8044(ha)) {
+			qla8044_idc_lock(ha);
+			qla82xx_set_reset_owner(vha);
+			qla8044_idc_unlock(ha);
+		} else
+			qla2x00_system_error(vha);
+		break;
+	case 4:
+		if (IS_P3P_TYPE(ha)) {
+			if (ha->md_tmplt_hdr)
+				ql_dbg(ql_dbg_user, vha, 0x705b,
+				    "MiniDump supported with this firmware.\n");
+			else
+				ql_dbg(ql_dbg_user, vha, 0x709d,
+				    "MiniDump not supported with this firmware.\n");
+		}
+		break;
+	case 5:
+		if (IS_P3P_TYPE(ha))
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+	case 6:
+		if (!ha->mctp_dump_reading)
+			break;
+		ql_log(ql_log_info, vha, 0x70c1,
+		    "MCTP dump cleared on (%ld).\n", vha->host_no);
+		ha->mctp_dump_reading = 0;
+		ha->mctp_dumped = 0;
+		break;
+	case 7:
+		if (ha->mctp_dumped && !ha->mctp_dump_reading) {
+			ha->mctp_dump_reading = 1;
+			ql_log(ql_log_info, vha, 0x70c2,
+			    "Raw mctp dump ready for read on (%ld).\n",
+			    vha->host_no);
+		}
+		break;
+	}
+	return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+	.attr = {
+		.name = "fw_dump",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_fw_dump,
+	.write = qla2x00_sysfs_write_fw_dump,
+};
+
+static ssize_t
+qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
+			 struct bin_attribute *bin_attr,
+			 char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return 0;
+
+	if (IS_NOCACHE_VPD_TYPE(ha))
+		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+		    ha->nvram_size);
+	return memory_read_from_buffer(buf, count, &off, ha->nvram,
+					ha->nvram_size);
+}
+
+static ssize_t
+qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
+			  struct bin_attribute *bin_attr,
+			  char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t	cnt;
+
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
+	    !ha->isp_ops->write_nvram)
+		return -EINVAL;
+
+	/* Checksum NVRAM. */
+	if (IS_FWI2_CAPABLE(ha)) {
+		uint32_t *iter;
+		uint32_t chksum;
+
+		iter = (uint32_t *)buf;
+		chksum = 0;
+		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
+			chksum += le32_to_cpu(*iter);
+		chksum = ~chksum + 1;
+		*iter = cpu_to_le32(chksum);
+	} else {
+		uint8_t *iter;
+		uint8_t chksum;
+
+		iter = (uint8_t *)buf;
+		chksum = 0;
+		for (cnt = 0; cnt < count - 1; cnt++)
+			chksum += *iter++;
+		chksum = ~chksum + 1;
+		*iter = chksum;
+	}
+
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x705f,
+		    "HBA not online, failing NVRAM update.\n");
+		return -EAGAIN;
+	}
+
+	/* Write NVRAM. */
+	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
+	    count);
+
+	ql_dbg(ql_dbg_user, vha, 0x7060,
+	    "Setting ISP_ABORT_NEEDED\n");
+	/* NVRAM settings take effect immediately. */
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	qla2x00_wait_for_chip_reset(vha);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_nvram_attr = {
+	.attr = {
+		.name = "nvram",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 512,
+	.read = qla2x00_sysfs_read_nvram,
+	.write = qla2x00_sysfs_write_nvram,
+};
+
+static ssize_t
+qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
+			  struct bin_attribute *bin_attr,
+			  char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	ssize_t rval = 0;
+
+	mutex_lock(&ha->optrom_mutex);
+
+	if (ha->optrom_state != QLA_SREADING)
+		goto out;
+
+	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+	    ha->optrom_region_size);
+
+out:
+	mutex_unlock(&ha->optrom_mutex);
+
+	return rval;
+}
+
+static ssize_t
+qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
+			   struct bin_attribute *bin_attr,
+			   char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+
+	mutex_lock(&ha->optrom_mutex);
+
+	if (ha->optrom_state != QLA_SWRITING) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -EINVAL;
+	}
+	if (off > ha->optrom_region_size) {
+		mutex_unlock(&ha->optrom_mutex);
+		return -ERANGE;
+	}
+	if (off + count > ha->optrom_region_size)
+		count = ha->optrom_region_size - off;
+
+	memcpy(&ha->optrom_buffer[off], buf, count);
+	mutex_unlock(&ha->optrom_mutex);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_optrom_attr = {
+	.attr = {
+		.name = "optrom",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_optrom,
+	.write = qla2x00_sysfs_write_optrom,
+};
+
+static ssize_t
+qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+			       struct bin_attribute *bin_attr,
+			       char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t start = 0;
+	uint32_t size = ha->optrom_size;
+	int val, valid;
+	ssize_t rval = count;
+
+	if (off)
+		return -EINVAL;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return -EAGAIN;
+
+	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
+		return -EINVAL;
+	if (start > ha->optrom_size)
+		return -EINVAL;
+	if (size > ha->optrom_size - start)
+		size = ha->optrom_size - start;
+
+	mutex_lock(&ha->optrom_mutex);
+	switch (val) {
+	case 0:
+		if (ha->optrom_state != QLA_SREADING &&
+		    ha->optrom_state != QLA_SWRITING) {
+			rval =  -EINVAL;
+			goto out;
+		}
+		ha->optrom_state = QLA_SWAITING;
+
+		ql_dbg(ql_dbg_user, vha, 0x7061,
+		    "Freeing flash region allocation -- 0x%x bytes.\n",
+		    ha->optrom_region_size);
+
+		vfree(ha->optrom_buffer);
+		ha->optrom_buffer = NULL;
+		break;
+	case 1:
+		if (ha->optrom_state != QLA_SWAITING) {
+			rval = -EINVAL;
+			goto out;
+		}
+
+		ha->optrom_region_start = start;
+		ha->optrom_region_size = size;
+
+		ha->optrom_state = QLA_SREADING;
+		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+		if (ha->optrom_buffer == NULL) {
+			ql_log(ql_log_warn, vha, 0x7062,
+			    "Unable to allocate memory for optrom retrieval "
+			    "(%x).\n", ha->optrom_region_size);
+
+			ha->optrom_state = QLA_SWAITING;
+			rval = -ENOMEM;
+			goto out;
+		}
+
+		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x7063,
+			    "HBA not online, failing NVRAM update.\n");
+			rval = -EAGAIN;
+			goto out;
+		}
+
+		ql_dbg(ql_dbg_user, vha, 0x7064,
+		    "Reading flash region -- 0x%x/0x%x.\n",
+		    ha->optrom_region_start, ha->optrom_region_size);
+
+		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+		    ha->optrom_region_start, ha->optrom_region_size);
+		break;
+	case 2:
+		if (ha->optrom_state != QLA_SWAITING) {
+			rval = -EINVAL;
+			goto out;
+		}
+
+		/*
+		 * We need to be more restrictive on which FLASH regions are
+		 * allowed to be updated via user-space.  Regions accessible
+		 * via this method include:
+		 *
+		 * ISP21xx/ISP22xx/ISP23xx type boards:
+		 *
+		 * 	0x000000 -> 0x020000 -- Boot code.
+		 *
+		 * ISP2322/ISP24xx type boards:
+		 *
+		 * 	0x000000 -> 0x07ffff -- Boot code.
+		 * 	0x080000 -> 0x0fffff -- Firmware.
+		 *
+		 * ISP25xx type boards:
+		 *
+		 * 	0x000000 -> 0x07ffff -- Boot code.
+		 * 	0x080000 -> 0x0fffff -- Firmware.
+		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
+		 */
+		valid = 0;
+		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+			valid = 1;
+		else if (start == (ha->flt_region_boot * 4) ||
+		    start == (ha->flt_region_fw * 4))
+			valid = 1;
+		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
+			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
+			|| IS_QLA27XX(ha))
+			valid = 1;
+		if (!valid) {
+			ql_log(ql_log_warn, vha, 0x7065,
+			    "Invalid start region 0x%x/0x%x.\n", start, size);
+			rval = -EINVAL;
+			goto out;
+		}
+
+		ha->optrom_region_start = start;
+		ha->optrom_region_size = size;
+
+		ha->optrom_state = QLA_SWRITING;
+		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+		if (ha->optrom_buffer == NULL) {
+			ql_log(ql_log_warn, vha, 0x7066,
+			    "Unable to allocate memory for optrom update "
+			    "(%x)\n", ha->optrom_region_size);
+
+			ha->optrom_state = QLA_SWAITING;
+			rval = -ENOMEM;
+			goto out;
+		}
+
+		ql_dbg(ql_dbg_user, vha, 0x7067,
+		    "Staging flash region write -- 0x%x/0x%x.\n",
+		    ha->optrom_region_start, ha->optrom_region_size);
+
+		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+		break;
+	case 3:
+		if (ha->optrom_state != QLA_SWRITING) {
+			rval = -EINVAL;
+			goto out;
+		}
+
+		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x7068,
+			    "HBA not online, failing flash update.\n");
+			rval = -EAGAIN;
+			goto out;
+		}
+
+		ql_dbg(ql_dbg_user, vha, 0x7069,
+		    "Writing flash region -- 0x%x/0x%x.\n",
+		    ha->optrom_region_start, ha->optrom_region_size);
+
+		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+		    ha->optrom_region_start, ha->optrom_region_size);
+		break;
+	default:
+		rval = -EINVAL;
+	}
+
+out:
+	mutex_unlock(&ha->optrom_mutex);
+	return rval;
+}
+
+static struct bin_attribute sysfs_optrom_ctl_attr = {
+	.attr = {
+		.name = "optrom_ctl",
+		.mode = S_IWUSR,
+	},
+	.size = 0,
+	.write = qla2x00_sysfs_write_optrom_ctl,
+};
+
+static ssize_t
+qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
+		       struct bin_attribute *bin_attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t faddr;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return -EAGAIN;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EINVAL;
+
+	if (IS_NOCACHE_VPD_TYPE(ha)) {
+		faddr = ha->flt_region_vpd << 2;
+
+		if (IS_QLA27XX(ha) &&
+		    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+			faddr = ha->flt_region_vpd_sec << 2;
+
+		ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
+		    ha->vpd_size);
+	}
+	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
+}
+
+static ssize_t
+qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
+			struct bin_attribute *bin_attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	uint8_t *tmp_data;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return 0;
+
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
+	    !ha->isp_ops->write_nvram)
+		return 0;
+
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x706a,
+		    "HBA not online, failing VPD update.\n");
+		return -EAGAIN;
+	}
+
+	/* Write NVRAM. */
+	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+
+	/* Update flash version information for 4Gb & above. */
+	if (!IS_FWI2_CAPABLE(ha))
+		return -EINVAL;
+
+	tmp_data = vmalloc(256);
+	if (!tmp_data) {
+		ql_log(ql_log_warn, vha, 0x706b,
+		    "Unable to allocate memory for VPD information update.\n");
+		return -ENOMEM;
+	}
+	ha->isp_ops->get_flash_version(vha, tmp_data);
+	vfree(tmp_data);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_vpd_attr = {
+	.attr = {
+		.name = "vpd",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_vpd,
+	.write = qla2x00_sysfs_write_vpd,
+};
+
+static ssize_t
+qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
+		       struct bin_attribute *bin_attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	int rval;
+
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
+		return 0;
+
+	if (qla2x00_reset_active(vha))
+		return 0;
+
+	rval = qla2x00_read_sfp_dev(vha, buf, count);
+	if (rval)
+		return -EIO;
+
+	return count;
+}
+
+static struct bin_attribute sysfs_sfp_attr = {
+	.attr = {
+		.name = "sfp",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = SFP_DEV_SIZE,
+	.read = qla2x00_sysfs_read_sfp,
+};
+
+static ssize_t
+qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
+			struct bin_attribute *bin_attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	int type;
+	uint32_t idc_control;
+	uint8_t *tmp_data = NULL;
+	if (off != 0)
+		return -EINVAL;
+
+	type = simple_strtol(buf, NULL, 10);
+	switch (type) {
+	case 0x2025c:
+		ql_log(ql_log_info, vha, 0x706e,
+		    "Issuing ISP reset.\n");
+
+		scsi_block_requests(vha->host);
+		if (IS_QLA82XX(ha)) {
+			ha->flags.isp82xx_no_md_cap = 1;
+			qla82xx_idc_lock(ha);
+			qla82xx_set_reset_owner(vha);
+			qla82xx_idc_unlock(ha);
+		} else if (IS_QLA8044(ha)) {
+			qla8044_idc_lock(ha);
+			idc_control = qla8044_rd_reg(ha,
+			    QLA8044_IDC_DRV_CTRL);
+			qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+			    (idc_control | GRACEFUL_RESET_BIT1));
+			qla82xx_set_reset_owner(vha);
+			qla8044_idc_unlock(ha);
+		} else {
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		qla2x00_wait_for_chip_reset(vha);
+		scsi_unblock_requests(vha->host);
+		break;
+	case 0x2025d:
+		if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+			return -EPERM;
+
+		ql_log(ql_log_info, vha, 0x706f,
+		    "Issuing MPI reset.\n");
+
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+			uint32_t idc_control;
+
+			qla83xx_idc_lock(vha, 0);
+			__qla83xx_get_idc_control(vha, &idc_control);
+			idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
+			__qla83xx_set_idc_control(vha, idc_control);
+			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+			    QLA8XXX_DEV_NEED_RESET);
+			qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+			qla83xx_idc_unlock(vha, 0);
+			break;
+		} else {
+			/* Make sure FC side is not in reset */
+			WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
+				     QLA_SUCCESS);
+
+			/* Issue MPI reset */
+			scsi_block_requests(vha->host);
+			if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
+				ql_log(ql_log_warn, vha, 0x7070,
+				    "MPI reset failed.\n");
+			scsi_unblock_requests(vha->host);
+			break;
+		}
+	case 0x2025e:
+		if (!IS_P3P_TYPE(ha) || vha != base_vha) {
+			ql_log(ql_log_info, vha, 0x7071,
+			    "FCoE ctx reset not supported.\n");
+			return -EPERM;
+		}
+
+		ql_log(ql_log_info, vha, 0x7072,
+		    "Issuing FCoE ctx reset.\n");
+		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		qla2x00_wait_for_fcoe_ctx_reset(vha);
+		break;
+	case 0x2025f:
+		if (!IS_QLA8031(ha))
+			return -EPERM;
+		ql_log(ql_log_info, vha, 0x70bc,
+		    "Disabling Reset by IDC control\n");
+		qla83xx_idc_lock(vha, 0);
+		__qla83xx_get_idc_control(vha, &idc_control);
+		idc_control |= QLA83XX_IDC_RESET_DISABLED;
+		__qla83xx_set_idc_control(vha, idc_control);
+		qla83xx_idc_unlock(vha, 0);
+		break;
+	case 0x20260:
+		if (!IS_QLA8031(ha))
+			return -EPERM;
+		ql_log(ql_log_info, vha, 0x70bd,
+		    "Enabling Reset by IDC control\n");
+		qla83xx_idc_lock(vha, 0);
+		__qla83xx_get_idc_control(vha, &idc_control);
+		idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
+		__qla83xx_set_idc_control(vha, idc_control);
+		qla83xx_idc_unlock(vha, 0);
+		break;
+	case 0x20261:
+		ql_dbg(ql_dbg_user, vha, 0x70e0,
+		    "Updating cache versions without reset ");
+
+		tmp_data = vmalloc(256);
+		if (!tmp_data) {
+			ql_log(ql_log_warn, vha, 0x70e1,
+			    "Unable to allocate memory for VPD information update.\n");
+			return -ENOMEM;
+		}
+		ha->isp_ops->get_flash_version(vha, tmp_data);
+		vfree(tmp_data);
+		break;
+	}
+	return count;
+}
+
+static struct bin_attribute sysfs_reset_attr = {
+	.attr = {
+		.name = "reset",
+		.mode = S_IWUSR,
+	},
+	.size = 0,
+	.write = qla2x00_sysfs_write_reset,
+};
+
+static ssize_t
+qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
+			struct bin_attribute *bin_attr,
+			char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	int type;
+	port_id_t did;
+
+	type = simple_strtol(buf, NULL, 10);
+
+	did.b.domain = (type & 0x00ff0000) >> 16;
+	did.b.area = (type & 0x0000ff00) >> 8;
+	did.b.al_pa = (type & 0x000000ff);
+
+	ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
+	    did.b.domain, did.b.area, did.b.al_pa);
+
+	ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
+
+	qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+	return count;
+}
+
+static struct bin_attribute sysfs_issue_logo_attr = {
+	.attr = {
+		.name = "issue_logo",
+		.mode = S_IWUSR,
+	},
+	.size = 0,
+	.write = qla2x00_issue_logo,
+};
+
+static ssize_t
+qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
+		       struct bin_attribute *bin_attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+	uint16_t actual_size;
+
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
+		return 0;
+
+	if (ha->xgmac_data)
+		goto do_read;
+
+	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+	    &ha->xgmac_data_dma, GFP_KERNEL);
+	if (!ha->xgmac_data) {
+		ql_log(ql_log_warn, vha, 0x7076,
+		    "Unable to allocate memory for XGMAC read-data.\n");
+		return 0;
+	}
+
+do_read:
+	actual_size = 0;
+	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
+
+	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
+	    XGMAC_DATA_SIZE, &actual_size);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x7077,
+		    "Unable to read XGMAC data (%x).\n", rval);
+		count = 0;
+	}
+
+	count = actual_size > count ? count: actual_size;
+	memcpy(buf, ha->xgmac_data, count);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_xgmac_stats_attr = {
+	.attr = {
+		.name = "xgmac_stats",
+		.mode = S_IRUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_xgmac_stats,
+};
+
+static ssize_t
+qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
+		       struct bin_attribute *bin_attr,
+		       char *buf, loff_t off, size_t count)
+{
+	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+	    struct device, kobj)));
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+
+	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
+		return 0;
+
+	if (ha->dcbx_tlv)
+		goto do_read;
+
+	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+	    &ha->dcbx_tlv_dma, GFP_KERNEL);
+	if (!ha->dcbx_tlv) {
+		ql_log(ql_log_warn, vha, 0x7078,
+		    "Unable to allocate memory for DCBX TLV read-data.\n");
+		return -ENOMEM;
+	}
+
+do_read:
+	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
+
+	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
+	    DCBX_TLV_DATA_SIZE);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x7079,
+		    "Unable to read DCBX TLV (%x).\n", rval);
+		return -EIO;
+	}
+
+	memcpy(buf, ha->dcbx_tlv, count);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_dcbx_tlv_attr = {
+	.attr = {
+		.name = "dcbx_tlv",
+		.mode = S_IRUSR,
+	},
+	.size = 0,
+	.read = qla2x00_sysfs_read_dcbx_tlv,
+};
+
+static struct sysfs_entry {
+	char *name;
+	struct bin_attribute *attr;
+	int is4GBp_only;
+} bin_file_entries[] = {
+	{ "fw_dump", &sysfs_fw_dump_attr, },
+	{ "nvram", &sysfs_nvram_attr, },
+	{ "optrom", &sysfs_optrom_attr, },
+	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
+	{ "vpd", &sysfs_vpd_attr, 1 },
+	{ "sfp", &sysfs_sfp_attr, 1 },
+	{ "reset", &sysfs_reset_attr, },
+	{ "issue_logo", &sysfs_issue_logo_attr, },
+	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
+	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
+	{ NULL },
+};
+
+void
+qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
+{
+	struct Scsi_Host *host = vha->host;
+	struct sysfs_entry *iter;
+	int ret;
+
+	for (iter = bin_file_entries; iter->name; iter++) {
+		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
+			continue;
+		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
+			continue;
+		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+			continue;
+
+		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+		    iter->attr);
+		if (ret)
+			ql_log(ql_log_warn, vha, 0x00f3,
+			    "Unable to create sysfs %s binary attribute (%d).\n",
+			    iter->name, ret);
+		else
+			ql_dbg(ql_dbg_init, vha, 0x00f4,
+			    "Successfully created sysfs %s binary attribute.\n",
+			    iter->name);
+	}
+}
+
+void
+qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
+{
+	struct Scsi_Host *host = vha->host;
+	struct sysfs_entry *iter;
+	struct qla_hw_data *ha = vha->hw;
+
+	for (iter = bin_file_entries; iter->name; iter++) {
+		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+			continue;
+		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
+			continue;
+		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+			continue;
+		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+			continue;
+
+		sysfs_remove_bin_file(&host->shost_gendev.kobj,
+		    iter->attr);
+	}
+
+	if (stop_beacon && ha->beacon_blink_led == 1)
+		ha->isp_ops->beacon_off(vha);
+}
+
+/* Scsi_Host attributes. */
+
+static ssize_t
+qla2x00_drvr_version_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
+}
+
+static ssize_t
+qla2x00_fw_version_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	char fw_str[128];
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
+}
+
+static ssize_t
+qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t sn;
+
+	if (IS_QLAFX00(vha->hw)) {
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->mr.serial_num);
+	} else if (IS_FWI2_CAPABLE(ha)) {
+		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
+		return strlen(strcat(buf, "\n"));
+	}
+
+	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
+	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
+	    sn % 100000);
+}
+
+static ssize_t
+qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
+}
+
+static ssize_t
+qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLAFX00(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->mr.hw_version);
+
+	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
+	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
+	    ha->product_id[3]);
+}
+
+static ssize_t
+qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
+}
+
+static ssize_t
+qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
+}
+
+static ssize_t
+qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	char pci_info[30];
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
+}
+
+static ssize_t
+qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	int len = 0;
+
+	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
+	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
+	    vha->device_flags & DFLG_NO_CABLE)
+		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
+	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
+	    qla2x00_reset_active(vha))
+		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
+	else {
+		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
+
+		switch (ha->current_topology) {
+		case ISP_CFG_NL:
+			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
+			break;
+		case ISP_CFG_FL:
+			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
+			break;
+		case ISP_CFG_N:
+			len += scnprintf(buf + len, PAGE_SIZE-len,
+			    "N_Port to N_Port\n");
+			break;
+		case ISP_CFG_F:
+			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
+			break;
+		default:
+			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
+			break;
+		}
+	}
+	return len;
+}
+
+static ssize_t
+qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
+		 char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int len = 0;
+
+	switch (vha->hw->zio_mode) {
+	case QLA_ZIO_MODE_6:
+		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
+		break;
+	case QLA_ZIO_DISABLED:
+		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
+		break;
+	}
+	return len;
+}
+
+static ssize_t
+qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
+		  const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	int val = 0;
+	uint16_t zio_mode;
+
+	if (!IS_ZIO_SUPPORTED(ha))
+		return -ENOTSUPP;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	if (val)
+		zio_mode = QLA_ZIO_MODE_6;
+	else
+		zio_mode = QLA_ZIO_DISABLED;
+
+	/* Update per-hba values and queue a reset. */
+	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
+		ha->zio_mode = zio_mode;
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	}
+	return strlen(buf);
+}
+
+static ssize_t
+qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
+}
+
+static ssize_t
+qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+	uint16_t zio_timer;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+	if (val > 25500 || val < 100)
+		return -ERANGE;
+
+	zio_timer = (uint16_t)(val / 100);
+	vha->hw->zio_timer = zio_timer;
+
+	return strlen(buf);
+}
+
+static ssize_t
+qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int len = 0;
+
+	if (vha->hw->beacon_blink_led)
+		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
+	else
+		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
+	return len;
+}
+
+static ssize_t
+qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	int val = 0;
+	int rval;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return -EPERM;
+
+	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+		ql_log(ql_log_warn, vha, 0x707a,
+		    "Abort ISP active -- ignoring beacon request.\n");
+		return -EBUSY;
+	}
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	if (val)
+		rval = ha->isp_ops->beacon_on(vha);
+	else
+		rval = ha->isp_ops->beacon_off(vha);
+
+	if (rval != QLA_SUCCESS)
+		count = 0;
+
+	return count;
+}
+
+static ssize_t
+qla2x00_optrom_bios_version_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
+	    ha->bios_revision[0]);
+}
+
+static ssize_t
+qla2x00_optrom_efi_version_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
+	    ha->efi_revision[0]);
+}
+
+static ssize_t
+qla2x00_optrom_fcode_version_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
+	    ha->fcode_revision[0]);
+}
+
+static ssize_t
+qla2x00_optrom_fw_version_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
+	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
+	    ha->fw_revision[3]);
+}
+
+static ssize_t
+qla2x00_optrom_gold_fw_version_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
+	    ha->gold_fw_version[0], ha->gold_fw_version[1],
+	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
+}
+
+static ssize_t
+qla2x00_total_isp_aborts_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
+	    vha->qla_stats.total_isp_aborts);
+}
+
+static ssize_t
+qla24xx_84xx_fw_version_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int rval = QLA_SUCCESS;
+	uint16_t status[2] = {0, 0};
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA84XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	if (ha->cs84xx->op_fw_version == 0)
+		rval = qla84xx_verify_chip(vha, status);
+
+	if ((rval == QLA_SUCCESS) && (status[0] == 0))
+		return scnprintf(buf, PAGE_SIZE, "%u\n",
+			(uint32_t)ha->cs84xx->op_fw_version);
+
+	return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
+qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
+	    !IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
+	    ha->mpi_capabilities);
+}
+
+static ssize_t
+qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
+}
+
+static ssize_t
+qla2x00_flash_block_size_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
+}
+
+static ssize_t
+qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_CNA_CAPABLE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
+}
+
+static ssize_t
+qla2x00_vn_port_mac_address_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_CNA_CAPABLE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
+}
+
+static ssize_t
+qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
+}
+
+static ssize_t
+qla2x00_thermal_temp_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	uint16_t temp = 0;
+
+	if (qla2x00_reset_active(vha)) {
+		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+		goto done;
+	}
+
+	if (vha->hw->flags.eeh_busy) {
+		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+		goto done;
+	}
+
+	if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
+
+done:
+	return scnprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
+qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int rval = QLA_FUNCTION_FAILED;
+	uint16_t state[6];
+	uint32_t pstate;
+
+	if (IS_QLAFX00(vha->hw)) {
+		pstate = qlafx00_fw_state_show(dev, attr, buf);
+		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
+	}
+
+	if (qla2x00_reset_active(vha))
+		ql_log(ql_log_warn, vha, 0x707c,
+		    "ISP reset active.\n");
+	else if (!vha->hw->flags.eeh_busy)
+		rval = qla2x00_get_firmware_state(vha, state);
+	if (rval != QLA_SUCCESS)
+		memset(state, -1, sizeof(state));
+
+	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+	    state[0], state[1], state[2], state[3], state[4], state[5]);
+}
+
+static ssize_t
+qla2x00_diag_requests_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_BIDI_CAPABLE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
+}
+
+static ssize_t
+qla2x00_diag_megabytes_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_BIDI_CAPABLE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n",
+	    vha->bidi_stats.transfer_bytes >> 20);
+}
+
+static ssize_t
+qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
+	char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t size;
+
+	if (!ha->fw_dumped)
+		size = 0;
+	else if (IS_P3P_TYPE(ha))
+		size = ha->md_template_size + ha->md_dump_size;
+	else
+		size = ha->fw_dump_len;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (!IS_P3P_TYPE(vha->hw))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+	else
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->allow_cna_fw_dump ? "true" : "false");
+}
+
+static ssize_t
+qla2x00_allow_cna_fw_dump_store(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int val = 0;
+
+	if (!IS_P3P_TYPE(vha->hw))
+		return -EINVAL;
+
+	if (sscanf(buf, "%d", &val) != 1)
+		return -EINVAL;
+
+	vha->hw->allow_cna_fw_dump = val != 0;
+
+	return strlen(buf);
+}
+
+static ssize_t
+qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
+	char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
+}
+
+static ssize_t
+qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->min_link_speed == 5 ? "32Gps" :
+	    ha->min_link_speed == 4 ? "16Gps" :
+	    ha->min_link_speed == 3 ? "8Gps" :
+	    ha->min_link_speed == 2 ? "4Gps" :
+	    ha->min_link_speed != 0 ? "unknown" : "");
+}
+
+static ssize_t
+qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+	    ha->max_speed_sup ? "32Gps" : "16Gps");
+}
+
+static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
+static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
+static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
+static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
+static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
+static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
+static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
+static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
+static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
+static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
+		   qla2x00_zio_timer_store);
+static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
+		   qla2x00_beacon_store);
+static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
+		   qla2x00_optrom_bios_version_show, NULL);
+static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
+		   qla2x00_optrom_efi_version_show, NULL);
+static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
+		   qla2x00_optrom_fcode_version_show, NULL);
+static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
+		   NULL);
+static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
+    qla2x00_optrom_gold_fw_version_show, NULL);
+static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
+		   NULL);
+static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
+		   NULL);
+static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
+static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
+static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
+		   NULL);
+static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
+static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
+		   qla2x00_vn_port_mac_address_show, NULL);
+static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
+static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
+static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
+static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
+static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
+static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
+		   qla2x00_allow_cna_fw_dump_show,
+		   qla2x00_allow_cna_fw_dump_store);
+static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
+static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
+static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+
+struct device_attribute *qla2x00_host_attrs[] = {
+	&dev_attr_driver_version,
+	&dev_attr_fw_version,
+	&dev_attr_serial_num,
+	&dev_attr_isp_name,
+	&dev_attr_isp_id,
+	&dev_attr_model_name,
+	&dev_attr_model_desc,
+	&dev_attr_pci_info,
+	&dev_attr_link_state,
+	&dev_attr_zio,
+	&dev_attr_zio_timer,
+	&dev_attr_beacon,
+	&dev_attr_optrom_bios_version,
+	&dev_attr_optrom_efi_version,
+	&dev_attr_optrom_fcode_version,
+	&dev_attr_optrom_fw_version,
+	&dev_attr_84xx_fw_version,
+	&dev_attr_total_isp_aborts,
+	&dev_attr_mpi_version,
+	&dev_attr_phy_version,
+	&dev_attr_flash_block_size,
+	&dev_attr_vlan_id,
+	&dev_attr_vn_port_mac_address,
+	&dev_attr_fabric_param,
+	&dev_attr_fw_state,
+	&dev_attr_optrom_gold_fw_version,
+	&dev_attr_thermal_temp,
+	&dev_attr_diag_requests,
+	&dev_attr_diag_megabytes,
+	&dev_attr_fw_dump_size,
+	&dev_attr_allow_cna_fw_dump,
+	&dev_attr_pep_version,
+	&dev_attr_min_link_speed,
+	&dev_attr_max_speed_sup,
+	NULL,
+};
+
+/* Host attributes. */
+
+static void
+qla2x00_get_host_port_id(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
+	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
+}
+
+static void
+qla2x00_get_host_speed(struct Scsi_Host *shost)
+{
+	struct qla_hw_data *ha = ((struct scsi_qla_host *)
+					(shost_priv(shost)))->hw;
+	u32 speed = FC_PORTSPEED_UNKNOWN;
+
+	if (IS_QLAFX00(ha)) {
+		qlafx00_get_host_speed(shost);
+		return;
+	}
+
+	switch (ha->link_data_rate) {
+	case PORT_SPEED_1GB:
+		speed = FC_PORTSPEED_1GBIT;
+		break;
+	case PORT_SPEED_2GB:
+		speed = FC_PORTSPEED_2GBIT;
+		break;
+	case PORT_SPEED_4GB:
+		speed = FC_PORTSPEED_4GBIT;
+		break;
+	case PORT_SPEED_8GB:
+		speed = FC_PORTSPEED_8GBIT;
+		break;
+	case PORT_SPEED_10GB:
+		speed = FC_PORTSPEED_10GBIT;
+		break;
+	case PORT_SPEED_16GB:
+		speed = FC_PORTSPEED_16GBIT;
+		break;
+	case PORT_SPEED_32GB:
+		speed = FC_PORTSPEED_32GBIT;
+		break;
+	}
+	fc_host_speed(shost) = speed;
+}
+
+static void
+qla2x00_get_host_port_type(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
+
+	if (vha->vp_idx) {
+		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+		return;
+	}
+	switch (vha->hw->current_topology) {
+	case ISP_CFG_NL:
+		port_type = FC_PORTTYPE_LPORT;
+		break;
+	case ISP_CFG_FL:
+		port_type = FC_PORTTYPE_NLPORT;
+		break;
+	case ISP_CFG_N:
+		port_type = FC_PORTTYPE_PTP;
+		break;
+	case ISP_CFG_F:
+		port_type = FC_PORTTYPE_NPORT;
+		break;
+	}
+	fc_host_port_type(shost) = port_type;
+}
+
+static void
+qla2x00_get_starget_node_name(struct scsi_target *starget)
+{
+	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+	scsi_qla_host_t *vha = shost_priv(host);
+	fc_port_t *fcport;
+	u64 node_name = 0;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->rport &&
+		    starget->id == fcport->rport->scsi_target_id) {
+			node_name = wwn_to_u64(fcport->node_name);
+			break;
+		}
+	}
+
+	fc_starget_node_name(starget) = node_name;
+}
+
+static void
+qla2x00_get_starget_port_name(struct scsi_target *starget)
+{
+	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+	scsi_qla_host_t *vha = shost_priv(host);
+	fc_port_t *fcport;
+	u64 port_name = 0;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->rport &&
+		    starget->id == fcport->rport->scsi_target_id) {
+			port_name = wwn_to_u64(fcport->port_name);
+			break;
+		}
+	}
+
+	fc_starget_port_name(starget) = port_name;
+}
+
+static void
+qla2x00_get_starget_port_id(struct scsi_target *starget)
+{
+	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
+	scsi_qla_host_t *vha = shost_priv(host);
+	fc_port_t *fcport;
+	uint32_t port_id = ~0U;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->rport &&
+		    starget->id == fcport->rport->scsi_target_id) {
+			port_id = fcport->d_id.b.domain << 16 |
+			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+			break;
+		}
+	}
+
+	fc_starget_port_id(starget) = port_id;
+}
+
+static void
+qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+	if (timeout)
+		rport->dev_loss_tmo = timeout;
+	else
+		rport->dev_loss_tmo = 1;
+}
+
+static void
+qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+	struct Scsi_Host *host = rport_to_shost(rport);
+	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+	unsigned long flags;
+
+	if (!fcport)
+		return;
+
+	/* Now that the rport has been deleted, set the fcport state to
+	   FCS_DEVICE_DEAD */
+	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+
+	/*
+	 * Transport has effectively 'deleted' the rport, clear
+	 * all local references.
+	 */
+	spin_lock_irqsave(host->host_lock, flags);
+	fcport->rport = fcport->drport = NULL;
+	*((fc_port_t **)rport->dd_data) = NULL;
+	spin_unlock_irqrestore(host->host_lock, flags);
+
+	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+		return;
+
+	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+		return;
+	}
+}
+
+static void
+qla2x00_terminate_rport_io(struct fc_rport *rport)
+{
+	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
+	if (!fcport)
+		return;
+
+	if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
+		return;
+
+	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+		return;
+
+	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+		return;
+	}
+	/*
+	 * At this point all fcport's software-states are cleared.  Perform any
+	 * final cleanup of firmware resources (PCBs and XCBs).
+	 */
+	if (fcport->loop_id != FC_NO_LOOP_ID) {
+		if (IS_FWI2_CAPABLE(fcport->vha->hw))
+			fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+			    fcport->loop_id, fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+		else
+			qla2x00_port_logout(fcport->vha, fcport);
+	}
+}
+
+static int
+qla2x00_issue_lip(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	if (IS_QLAFX00(vha->hw))
+		return 0;
+
+	qla2x00_loop_reset(vha);
+	return 0;
+}
+
+static struct fc_host_statistics *
+qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	int rval;
+	struct link_statistics *stats;
+	dma_addr_t stats_dma;
+	struct fc_host_statistics *p = &vha->fc_host_stat;
+
+	memset(p, -1, sizeof(*p));
+
+	if (IS_QLAFX00(vha->hw))
+		goto done;
+
+	if (test_bit(UNLOADING, &vha->dpc_flags))
+		goto done;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		goto done;
+
+	if (qla2x00_reset_active(vha))
+		goto done;
+
+	stats = dma_alloc_coherent(&ha->pdev->dev,
+	    sizeof(*stats), &stats_dma, GFP_KERNEL);
+	if (!stats) {
+		ql_log(ql_log_warn, vha, 0x707d,
+		    "Failed to allocate memory for stats.\n");
+		goto done;
+	}
+	memset(stats, 0, sizeof(*stats));
+
+	rval = QLA_FUNCTION_FAILED;
+	if (IS_FWI2_CAPABLE(ha)) {
+		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
+	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
+	    !ha->dpc_active) {
+		/* Must be in a 'READY' state for statistics retrieval. */
+		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
+						stats, stats_dma);
+	}
+
+	if (rval != QLA_SUCCESS)
+		goto done_free;
+
+	p->link_failure_count = stats->link_fail_cnt;
+	p->loss_of_sync_count = stats->loss_sync_cnt;
+	p->loss_of_signal_count = stats->loss_sig_cnt;
+	p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
+	p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
+	p->invalid_crc_count = stats->inval_crc_cnt;
+	if (IS_FWI2_CAPABLE(ha)) {
+		p->lip_count = stats->lip_cnt;
+		p->tx_frames = stats->tx_frames;
+		p->rx_frames = stats->rx_frames;
+		p->dumped_frames = stats->discarded_frames;
+		p->nos_count = stats->nos_rcvd;
+		p->error_frames =
+			stats->dropped_frames + stats->discarded_frames;
+		p->rx_words = vha->qla_stats.input_bytes;
+		p->tx_words = vha->qla_stats.output_bytes;
+	}
+	p->fcp_control_requests = vha->qla_stats.control_requests;
+	p->fcp_input_requests = vha->qla_stats.input_requests;
+	p->fcp_output_requests = vha->qla_stats.output_requests;
+	p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+	p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+	p->seconds_since_last_reset =
+		get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+	do_div(p->seconds_since_last_reset, HZ);
+
+done_free:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+	    stats, stats_dma);
+done:
+	return p;
+}
+
+static void
+qla2x00_reset_host_stats(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	struct link_statistics *stats;
+	dma_addr_t stats_dma;
+
+	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
+	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
+	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+
+	if (IS_FWI2_CAPABLE(ha)) {
+		stats = dma_alloc_coherent(&ha->pdev->dev,
+		    sizeof(*stats), &stats_dma, GFP_KERNEL);
+		if (!stats) {
+			ql_log(ql_log_warn, vha, 0x70d7,
+			    "Failed to allocate memory for stats.\n");
+			return;
+		}
+
+		/* reset firmware statistics */
+		qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+
+		dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
+		    stats, stats_dma);
+	}
+}
+
+static void
+qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
+	    sizeof(fc_host_symbolic_name(shost)));
+}
+
+static void
+qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+}
+
+static void
+qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
+		0xFF, 0xFF, 0xFF, 0xFF};
+	u64 fabric_name = wwn_to_u64(node_name);
+
+	if (vha->device_flags & SWITCH_FOUND)
+		fabric_name = wwn_to_u64(vha->fabric_node_name);
+
+	fc_host_fabric_name(shost) = fabric_name;
+}
+
+static void
+qla2x00_get_host_port_state(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
+
+	if (!base_vha->flags.online) {
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+		return;
+	}
+
+	switch (atomic_read(&base_vha->loop_state)) {
+	case LOOP_UPDATE:
+		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+		break;
+	case LOOP_DOWN:
+		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+		else
+			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+		break;
+	case LOOP_DEAD:
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+		break;
+	case LOOP_READY:
+		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+		break;
+	default:
+		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+		break;
+	}
+}
+
+static int
+qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+	int	ret = 0;
+	uint8_t	qos = 0;
+	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+	scsi_qla_host_t *vha = NULL;
+	struct qla_hw_data *ha = base_vha->hw;
+	int	cnt;
+	struct req_que *req = ha->req_q_map[0];
+	struct qla_qpair *qpair;
+
+	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0x707e,
+		    "Vport sanity check failed, status %x\n", ret);
+		return (ret);
+	}
+
+	vha = qla24xx_create_vhost(fc_vport);
+	if (vha == NULL) {
+		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
+		return FC_VPORT_FAILED;
+	}
+	if (disable) {
+		atomic_set(&vha->vp_state, VP_OFFLINE);
+		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+	} else
+		atomic_set(&vha->vp_state, VP_FAILED);
+
+	/* ready to create vport */
+	ql_log(ql_log_info, vha, 0x7080,
+	    "VP entry id %d assigned.\n", vha->vp_idx);
+
+	/* initialized vport states */
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->vp_err_state=  VP_ERR_PORTDWN;
+	vha->vp_prev_err_state=  VP_ERR_UNKWN;
+	/* Check if physical ha port is Up */
+	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
+	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+		/* Don't retry or attempt login of this virtual port */
+		ql_dbg(ql_dbg_user, vha, 0x7081,
+		    "Vport loop state is not UP.\n");
+		atomic_set(&vha->loop_state, LOOP_DEAD);
+		if (!disable)
+			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+	}
+
+	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+		if (ha->fw_attributes & BIT_4) {
+			int prot = 0, guard;
+			vha->flags.difdix_supported = 1;
+			ql_dbg(ql_dbg_user, vha, 0x7082,
+			    "Registered for DIF/DIX type 1 and 3 protection.\n");
+			if (ql2xenabledif == 1)
+				prot = SHOST_DIX_TYPE0_PROTECTION;
+			scsi_host_set_prot(vha->host,
+			    prot | SHOST_DIF_TYPE1_PROTECTION
+			    | SHOST_DIF_TYPE2_PROTECTION
+			    | SHOST_DIF_TYPE3_PROTECTION
+			    | SHOST_DIX_TYPE1_PROTECTION
+			    | SHOST_DIX_TYPE2_PROTECTION
+			    | SHOST_DIX_TYPE3_PROTECTION);
+
+			guard = SHOST_DIX_GUARD_CRC;
+
+			if (IS_PI_IPGUARD_CAPABLE(ha) &&
+			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+				guard |= SHOST_DIX_GUARD_IP;
+
+			scsi_host_set_guard(vha->host, guard);
+		} else
+			vha->flags.difdix_supported = 0;
+	}
+
+	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
+				   &ha->pdev->dev)) {
+		ql_dbg(ql_dbg_user, vha, 0x7083,
+		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
+		goto vport_create_failed_2;
+	}
+
+	/* initialize attributes */
+	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
+	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+	fc_host_supported_classes(vha->host) =
+		fc_host_supported_classes(base_vha->host);
+	fc_host_supported_speeds(vha->host) =
+		fc_host_supported_speeds(base_vha->host);
+
+	qlt_vport_create(vha, ha);
+	qla24xx_vport_disable(fc_vport, disable);
+
+	if (!ql2xmqsupport || !ha->npiv_info)
+		goto vport_queue;
+
+	/* Create a request queue in QoS mode for the vport */
+	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
+		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
+			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
+					8) == 0) {
+			qos = ha->npiv_info[cnt].q_qos;
+			break;
+		}
+	}
+
+	if (qos) {
+		qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
+		if (!qpair)
+			ql_log(ql_log_warn, vha, 0x7084,
+			    "Can't create qpair for VP[%d]\n",
+			    vha->vp_idx);
+		else {
+			ql_dbg(ql_dbg_multiq, vha, 0xc001,
+			    "Queue pair: %d Qos: %d) created for VP[%d]\n",
+			    qpair->id, qos, vha->vp_idx);
+			ql_dbg(ql_dbg_user, vha, 0x7085,
+			    "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+			    qpair->id, qos, vha->vp_idx);
+			req = qpair->req;
+			vha->qpair = qpair;
+		}
+	}
+
+vport_queue:
+	vha->req = req;
+	return 0;
+
+vport_create_failed_2:
+	qla24xx_disable_vp(vha);
+	qla24xx_deallocate_vp_id(vha);
+	scsi_host_put(vha->host);
+	return FC_VPORT_FAILED;
+}
+
+static int
+qla24xx_vport_delete(struct fc_vport *fc_vport)
+{
+	scsi_qla_host_t *vha = fc_vport->dd_data;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t id = vha->vp_idx;
+
+	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
+	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
+		msleep(1000);
+
+	qla24xx_disable_vp(vha);
+	qla2x00_wait_for_sess_deletion(vha);
+
+	vha->flags.delete_progress = 1;
+
+	qlt_remove_target(ha, vha);
+
+	fc_remove_host(vha->host);
+
+	scsi_remove_host(vha->host);
+
+	/* Allow timer to run to drain queued items, when removing vp */
+	qla24xx_deallocate_vp_id(vha);
+
+	if (vha->timer_active) {
+		qla2x00_vp_stop_timer(vha);
+		ql_dbg(ql_dbg_user, vha, 0x7086,
+		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
+	}
+
+	qla2x00_free_fcports(vha);
+
+	mutex_lock(&ha->vport_lock);
+	ha->cur_vport_count--;
+	clear_bit(vha->vp_idx, ha->vp_idx_map);
+	mutex_unlock(&ha->vport_lock);
+
+	dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
+	    vha->gnl.ldma);
+
+	if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
+		if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
+			ql_log(ql_log_warn, vha, 0x7087,
+			    "Queue Pair delete failed.\n");
+	}
+
+	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+	scsi_host_put(vha->host);
+	return 0;
+}
+
+static int
+qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+	scsi_qla_host_t *vha = fc_vport->dd_data;
+
+	if (disable)
+		qla24xx_disable_vp(vha);
+	else
+		qla24xx_enable_vp(vha);
+
+	return 0;
+}
+
+struct fc_function_template qla2xxx_transport_functions = {
+
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_speeds = 1,
+
+	.get_host_port_id = qla2x00_get_host_port_id,
+	.show_host_port_id = 1,
+	.get_host_speed = qla2x00_get_host_speed,
+	.show_host_speed = 1,
+	.get_host_port_type = qla2x00_get_host_port_type,
+	.show_host_port_type = 1,
+	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
+	.show_host_symbolic_name = 1,
+	.set_host_system_hostname = qla2x00_set_host_system_hostname,
+	.show_host_system_hostname = 1,
+	.get_host_fabric_name = qla2x00_get_host_fabric_name,
+	.show_host_fabric_name = 1,
+	.get_host_port_state = qla2x00_get_host_port_state,
+	.show_host_port_state = 1,
+
+	.dd_fcrport_size = sizeof(struct fc_port *),
+	.show_rport_supported_classes = 1,
+
+	.get_starget_node_name = qla2x00_get_starget_node_name,
+	.show_starget_node_name = 1,
+	.get_starget_port_name = qla2x00_get_starget_port_name,
+	.show_starget_port_name = 1,
+	.get_starget_port_id  = qla2x00_get_starget_port_id,
+	.show_starget_port_id = 1,
+
+	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.issue_fc_host_lip = qla2x00_issue_lip,
+	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+	.terminate_rport_io = qla2x00_terminate_rport_io,
+	.get_fc_host_stats = qla2x00_get_fc_host_stats,
+	.reset_fc_host_stats = qla2x00_reset_host_stats,
+
+	.vport_create = qla24xx_vport_create,
+	.vport_disable = qla24xx_vport_disable,
+	.vport_delete = qla24xx_vport_delete,
+	.bsg_request = qla24xx_bsg_request,
+	.bsg_timeout = qla24xx_bsg_timeout,
+};
+
+struct fc_function_template qla2xxx_transport_vport_functions = {
+
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+
+	.get_host_port_id = qla2x00_get_host_port_id,
+	.show_host_port_id = 1,
+	.get_host_speed = qla2x00_get_host_speed,
+	.show_host_speed = 1,
+	.get_host_port_type = qla2x00_get_host_port_type,
+	.show_host_port_type = 1,
+	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
+	.show_host_symbolic_name = 1,
+	.set_host_system_hostname = qla2x00_set_host_system_hostname,
+	.show_host_system_hostname = 1,
+	.get_host_fabric_name = qla2x00_get_host_fabric_name,
+	.show_host_fabric_name = 1,
+	.get_host_port_state = qla2x00_get_host_port_state,
+	.show_host_port_state = 1,
+
+	.dd_fcrport_size = sizeof(struct fc_port *),
+	.show_rport_supported_classes = 1,
+
+	.get_starget_node_name = qla2x00_get_starget_node_name,
+	.show_starget_node_name = 1,
+	.get_starget_port_name = qla2x00_get_starget_port_name,
+	.show_starget_port_name = 1,
+	.get_starget_port_id  = qla2x00_get_starget_port_id,
+	.show_starget_port_id = 1,
+
+	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.issue_fc_host_lip = qla2x00_issue_lip,
+	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
+	.terminate_rport_io = qla2x00_terminate_rport_io,
+	.get_fc_host_stats = qla2x00_get_fc_host_stats,
+	.reset_fc_host_stats = qla2x00_reset_host_stats,
+
+	.bsg_request = qla24xx_bsg_request,
+	.bsg_timeout = qla24xx_bsg_timeout,
+};
+
+void
+qla2x00_init_host_attr(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	u32 speed = FC_PORTSPEED_UNKNOWN;
+
+	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
+	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+	fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
+			(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
+	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
+	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
+
+	if (IS_CNA_CAPABLE(ha))
+		speed = FC_PORTSPEED_10GBIT;
+	else if (IS_QLA2031(ha))
+		speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+		    FC_PORTSPEED_4GBIT;
+	else if (IS_QLA25XX(ha))
+		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLA24XX_TYPE(ha))
+		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
+		    FC_PORTSPEED_1GBIT;
+	else if (IS_QLA23XX(ha))
+		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLAFX00(ha))
+		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLA27XX(ha))
+		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+		    FC_PORTSPEED_8GBIT;
+	else
+		speed = FC_PORTSPEED_1GBIT;
+	fc_host_supported_speeds(vha->host) = speed;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_bsg.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_bsg.c
new file mode 100644
index 0000000..c1ca21a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_bsg.c
@@ -0,0 +1,2584 @@
+	/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/bsg-lib.h>
+
+/* BSG support for ELS/CT pass through */
+void
+qla2x00_bsg_job_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+	bsg_reply->result = res;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	sp->free(sp);
+}
+
+void
+qla2x00_bsg_sp_free(void *ptr)
+{
+	srb_t *sp = ptr;
+	struct qla_hw_data *ha = sp->vha->hw;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+
+	if (sp->type == SRB_FXIOCB_BCMD) {
+		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+		    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+			dma_unmap_sg(&ha->pdev->dev,
+			    bsg_job->request_payload.sg_list,
+			    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+			dma_unmap_sg(&ha->pdev->dev,
+			    bsg_job->reply_payload.sg_list,
+			    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	}
+
+	if (sp->type == SRB_CT_CMD ||
+	    sp->type == SRB_FXIOCB_BCMD ||
+	    sp->type == SRB_ELS_CMD_HST)
+		kfree(sp->fcport);
+	qla2x00_rel_sp(sp);
+}
+
+int
+qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
+	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+{
+	int i, ret, num_valid;
+	uint8_t *bcode;
+	struct qla_fcp_prio_entry *pri_entry;
+	uint32_t *bcode_val_ptr, bcode_val;
+
+	ret = 1;
+	num_valid = 0;
+	bcode = (uint8_t *)pri_cfg;
+	bcode_val_ptr = (uint32_t *)pri_cfg;
+	bcode_val = (uint32_t)(*bcode_val_ptr);
+
+	if (bcode_val == 0xFFFFFFFF) {
+		/* No FCP Priority config data in flash */
+		ql_dbg(ql_dbg_user, vha, 0x7051,
+		    "No FCP Priority config data.\n");
+		return 0;
+	}
+
+	if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
+			bcode[3] != 'S') {
+		/* Invalid FCP priority data header*/
+		ql_dbg(ql_dbg_user, vha, 0x7052,
+		    "Invalid FCP Priority data header. bcode=0x%x.\n",
+		    bcode_val);
+		return 0;
+	}
+	if (flag != 1)
+		return ret;
+
+	pri_entry = &pri_cfg->entry[0];
+	for (i = 0; i < pri_cfg->num_entries; i++) {
+		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+			num_valid++;
+		pri_entry++;
+	}
+
+	if (num_valid == 0) {
+		/* No valid FCP priority data entries */
+		ql_dbg(ql_dbg_user, vha, 0x7053,
+		    "No valid FCP Priority data entries.\n");
+		ret = 0;
+	} else {
+		/* FCP priority data is valid */
+		ql_dbg(ql_dbg_user, vha, 0x7054,
+		    "Valid FCP priority data. num entries = %d.\n",
+		    num_valid);
+	}
+
+	return ret;
+}
+
+static int
+qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int ret = 0;
+	uint32_t len;
+	uint32_t oper;
+
+	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
+		ret = -EINVAL;
+		goto exit_fcp_prio_cfg;
+	}
+
+	/* Get the sub command */
+	oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+	/* Only set config is allowed if config memory is not allocated */
+	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
+		ret = -EINVAL;
+		goto exit_fcp_prio_cfg;
+	}
+	switch (oper) {
+	case QLFC_FCP_PRIO_DISABLE:
+		if (ha->flags.fcp_prio_enabled) {
+			ha->flags.fcp_prio_enabled = 0;
+			ha->fcp_prio_cfg->attributes &=
+				~FCP_PRIO_ATTR_ENABLE;
+			qla24xx_update_all_fcp_prio(vha);
+			bsg_reply->result = DID_OK;
+		} else {
+			ret = -EINVAL;
+			bsg_reply->result = (DID_ERROR << 16);
+			goto exit_fcp_prio_cfg;
+		}
+		break;
+
+	case QLFC_FCP_PRIO_ENABLE:
+		if (!ha->flags.fcp_prio_enabled) {
+			if (ha->fcp_prio_cfg) {
+				ha->flags.fcp_prio_enabled = 1;
+				ha->fcp_prio_cfg->attributes |=
+				    FCP_PRIO_ATTR_ENABLE;
+				qla24xx_update_all_fcp_prio(vha);
+				bsg_reply->result = DID_OK;
+			} else {
+				ret = -EINVAL;
+				bsg_reply->result = (DID_ERROR << 16);
+				goto exit_fcp_prio_cfg;
+			}
+		}
+		break;
+
+	case QLFC_FCP_PRIO_GET_CONFIG:
+		len = bsg_job->reply_payload.payload_len;
+		if (!len || len > FCP_PRIO_CFG_SIZE) {
+			ret = -EINVAL;
+			bsg_reply->result = (DID_ERROR << 16);
+			goto exit_fcp_prio_cfg;
+		}
+
+		bsg_reply->result = DID_OK;
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(
+			bsg_job->reply_payload.sg_list,
+			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
+			len);
+
+		break;
+
+	case QLFC_FCP_PRIO_SET_CONFIG:
+		len = bsg_job->request_payload.payload_len;
+		if (!len || len > FCP_PRIO_CFG_SIZE) {
+			bsg_reply->result = (DID_ERROR << 16);
+			ret = -EINVAL;
+			goto exit_fcp_prio_cfg;
+		}
+
+		if (!ha->fcp_prio_cfg) {
+			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+			if (!ha->fcp_prio_cfg) {
+				ql_log(ql_log_warn, vha, 0x7050,
+				    "Unable to allocate memory for fcp prio "
+				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
+				bsg_reply->result = (DID_ERROR << 16);
+				ret = -ENOMEM;
+				goto exit_fcp_prio_cfg;
+			}
+		}
+
+		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
+			FCP_PRIO_CFG_SIZE);
+
+		/* validate fcp priority data */
+
+		if (!qla24xx_fcp_prio_cfg_valid(vha,
+		    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
+			bsg_reply->result = (DID_ERROR << 16);
+			ret = -EINVAL;
+			/* If buffer was invalidatic int
+			 * fcp_prio_cfg is of no use
+			 */
+			vfree(ha->fcp_prio_cfg);
+			ha->fcp_prio_cfg = NULL;
+			goto exit_fcp_prio_cfg;
+		}
+
+		ha->flags.fcp_prio_enabled = 0;
+		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
+			ha->flags.fcp_prio_enabled = 1;
+		qla24xx_update_all_fcp_prio(vha);
+		bsg_reply->result = DID_OK;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+exit_fcp_prio_cfg:
+	if (!ret)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return ret;
+}
+
+static int
+qla2x00_process_els(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_rport *rport;
+	fc_port_t *fcport = NULL;
+	struct Scsi_Host *host;
+	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha;
+	srb_t *sp;
+	const char *type;
+	int req_sg_cnt, rsp_sg_cnt;
+	int rval =  (DID_ERROR << 16);
+	uint16_t nextlid = 0;
+
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+		rport = fc_bsg_to_rport(bsg_job);
+		fcport = *(fc_port_t **) rport->dd_data;
+		host = rport_to_shost(rport);
+		vha = shost_priv(host);
+		ha = vha->hw;
+		type = "FC_BSG_RPT_ELS";
+	} else {
+		host = fc_bsg_to_shost(bsg_job);
+		vha = shost_priv(host);
+		ha = vha->hw;
+		type = "FC_BSG_HST_ELS_NOLOGIN";
+	}
+
+	if (!vha->flags.online) {
+		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
+		rval = -EIO;
+		goto done;
+	}
+
+	/* pass through is supported only for ISP 4Gb or higher */
+	if (!IS_FWI2_CAPABLE(ha)) {
+		ql_dbg(ql_dbg_user, vha, 0x7001,
+		    "ELS passthru not supported for ISP23xx based adapters.\n");
+		rval = -EPERM;
+		goto done;
+	}
+
+	/*  Multiple SG's are not supported for ELS requests */
+	if (bsg_job->request_payload.sg_cnt > 1 ||
+		bsg_job->reply_payload.sg_cnt > 1) {
+		ql_dbg(ql_dbg_user, vha, 0x7002,
+		    "Multiple SG's are not supported for ELS requests, "
+		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
+		    bsg_job->request_payload.sg_cnt,
+		    bsg_job->reply_payload.sg_cnt);
+		rval = -EPERM;
+		goto done;
+	}
+
+	/* ELS request for rport */
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+		/* make sure the rport is logged in,
+		 * if not perform fabric login
+		 */
+		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+			ql_dbg(ql_dbg_user, vha, 0x7003,
+			    "Failed to login port %06X for ELS passthru.\n",
+			    fcport->d_id.b24);
+			rval = -EIO;
+			goto done;
+		}
+	} else {
+		/* Allocate a dummy fcport structure, since functions
+		 * preparing the IOCB and mailbox command retrieves port
+		 * specific information from fcport structure. For Host based
+		 * ELS commands there will be no fcport structure allocated
+		 */
+		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (!fcport) {
+			rval = -ENOMEM;
+			goto done;
+		}
+
+		/* Initialize all required  fields of fcport */
+		fcport->vha = vha;
+		fcport->d_id.b.al_pa =
+			bsg_request->rqst_data.h_els.port_id[0];
+		fcport->d_id.b.area =
+			bsg_request->rqst_data.h_els.port_id[1];
+		fcport->d_id.b.domain =
+			bsg_request->rqst_data.h_els.port_id[2];
+		fcport->loop_id =
+			(fcport->d_id.b.al_pa == 0xFD) ?
+			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+	}
+
+	req_sg_cnt =
+		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	if (!req_sg_cnt) {
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+
+	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+        if (!rsp_sg_cnt) {
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+
+	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+		ql_log(ql_log_warn, vha, 0x7008,
+		    "dma mapping resulted in different sg counts, "
+		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
+		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
+		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+		rval = -EAGAIN;
+		goto done_unmap_sg;
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp) {
+		rval = -ENOMEM;
+		goto done_unmap_sg;
+	}
+
+	sp->type =
+		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
+		 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+	sp->name =
+		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
+		 "bsg_els_rpt" : "bsg_els_hst");
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->done = qla2x00_bsg_job_done;
+
+	ql_dbg(ql_dbg_user, vha, 0x700a,
+	    "bsg rqst type: %s els type: %x - loop-id=%x "
+	    "portid=%-2x%02x%02x.\n", type,
+	    bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x700e,
+		    "qla2x00_start_sp failed = %d\n", rval);
+		qla2x00_rel_sp(sp);
+		rval = -EIO;
+		goto done_unmap_sg;
+	}
+	return rval;
+
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	goto done_free_fcport;
+
+done_free_fcport:
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS)
+		kfree(fcport);
+done:
+	return rval;
+}
+
+static inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > 2) {
+		iocbs += (dsds - 2) / 5;
+		if ((dsds - 2) % 5)
+			iocbs++;
+	}
+	return iocbs;
+}
+
+static int
+qla2x00_process_ct(struct bsg_job *bsg_job)
+{
+	srb_t *sp;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = (DID_ERROR << 16);
+	int req_sg_cnt, rsp_sg_cnt;
+	uint16_t loop_id;
+	struct fc_port *fcport;
+	char  *type = "FC_BSG_HST_CT";
+
+	req_sg_cnt =
+		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	if (!req_sg_cnt) {
+		ql_log(ql_log_warn, vha, 0x700f,
+		    "dma_map_sg return %d for request\n", req_sg_cnt);
+		rval = -ENOMEM;
+		goto done;
+	}
+
+	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	if (!rsp_sg_cnt) {
+		ql_log(ql_log_warn, vha, 0x7010,
+		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+		rval = -ENOMEM;
+		goto done;
+	}
+
+	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+		ql_log(ql_log_warn, vha, 0x7011,
+		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+		rval = -EAGAIN;
+		goto done_unmap_sg;
+	}
+
+	if (!vha->flags.online) {
+		ql_log(ql_log_warn, vha, 0x7012,
+		    "Host is not online.\n");
+		rval = -EIO;
+		goto done_unmap_sg;
+	}
+
+	loop_id =
+		(bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+			>> 24;
+	switch (loop_id) {
+	case 0xFC:
+		loop_id = cpu_to_le16(NPH_SNS);
+		break;
+	case 0xFA:
+		loop_id = vha->mgmt_svr_loop_id;
+		break;
+	default:
+		ql_dbg(ql_dbg_user, vha, 0x7013,
+		    "Unknown loop id: %x.\n", loop_id);
+		rval = -EINVAL;
+		goto done_unmap_sg;
+	}
+
+	/* Allocate a dummy fcport structure, since functions preparing the
+	 * IOCB and mailbox command retrieves port specific information
+	 * from fcport structure. For Host based ELS commands there will be
+	 * no fcport structure allocated
+	 */
+	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (!fcport) {
+		ql_log(ql_log_warn, vha, 0x7014,
+		    "Failed to allocate fcport.\n");
+		rval = -ENOMEM;
+		goto done_unmap_sg;
+	}
+
+	/* Initialize all required  fields of fcport */
+	fcport->vha = vha;
+	fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
+	fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
+	fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
+	fcport->loop_id = loop_id;
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_warn, vha, 0x7015,
+		    "qla2x00_get_sp failed.\n");
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+
+	sp->type = SRB_CT_CMD;
+	sp->name = "bsg_ct";
+	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->done = qla2x00_bsg_job_done;
+
+	ql_dbg(ql_dbg_user, vha, 0x7016,
+	    "bsg rqst type: %s else type: %x - "
+	    "loop-id=%x portid=%02x%02x%02x.\n", type,
+	    (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
+	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+	    fcport->d_id.b.al_pa);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x7017,
+		    "qla2x00_start_sp failed=%d.\n", rval);
+		qla2x00_rel_sp(sp);
+		rval = -EIO;
+		goto done_free_fcport;
+	}
+	return rval;
+
+done_free_fcport:
+	kfree(fcport);
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+	return rval;
+}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+			    int wait, int wait2)
+{
+	int ret = 0;
+	int rval = 0;
+	uint16_t new_config[4];
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+		goto done_reset_internal;
+
+	memset(new_config, 0 , sizeof(new_config));
+	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+	    ENABLE_INTERNAL_LOOPBACK ||
+	    (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+	    ENABLE_EXTERNAL_LOOPBACK) {
+		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+		ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+		    (new_config[0] & INTERNAL_LOOPBACK_MASK));
+		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+		ha->notify_dcbx_comp = wait;
+		ha->notify_lb_portup_comp = wait2;
+
+		ret = qla81xx_set_port_config(vha, new_config);
+		if (ret != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x7025,
+			    "Set port config failed.\n");
+			ha->notify_dcbx_comp = 0;
+			ha->notify_lb_portup_comp = 0;
+			rval = -EINVAL;
+			goto done_reset_internal;
+		}
+
+		/* Wait for DCBX complete event */
+		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+			(DCBX_COMP_TIMEOUT * HZ))) {
+			ql_dbg(ql_dbg_user, vha, 0x7026,
+			    "DCBX completion not received.\n");
+			ha->notify_dcbx_comp = 0;
+			ha->notify_lb_portup_comp = 0;
+			rval = -EINVAL;
+			goto done_reset_internal;
+		} else
+			ql_dbg(ql_dbg_user, vha, 0x7027,
+			    "DCBX completion received.\n");
+
+		if (wait2 &&
+		    !wait_for_completion_timeout(&ha->lb_portup_comp,
+		    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+			ql_dbg(ql_dbg_user, vha, 0x70c5,
+			    "Port up completion not received.\n");
+			ha->notify_lb_portup_comp = 0;
+			rval = -EINVAL;
+			goto done_reset_internal;
+		} else
+			ql_dbg(ql_dbg_user, vha, 0x70c6,
+			    "Port up completion received.\n");
+
+		ha->notify_dcbx_comp = 0;
+		ha->notify_lb_portup_comp = 0;
+	}
+done_reset_internal:
+	return rval;
+}
+
+/*
+ * Set the port configuration to enable the internal or external loopback
+ * depending on the loopback mode.
+ */
+static inline int
+qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+	uint16_t *new_config, uint16_t mode)
+{
+	int ret = 0;
+	int rval = 0;
+	unsigned long rem_tmo = 0, current_tmo = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+		goto done_set_internal;
+
+	if (mode == INTERNAL_LOOPBACK)
+		new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
+	else if (mode == EXTERNAL_LOOPBACK)
+		new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
+	ql_dbg(ql_dbg_user, vha, 0x70be,
+	     "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
+
+	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
+
+	ha->notify_dcbx_comp = 1;
+	ret = qla81xx_set_port_config(vha, new_config);
+	if (ret != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x7021,
+		    "set port config failed.\n");
+		ha->notify_dcbx_comp = 0;
+		rval = -EINVAL;
+		goto done_set_internal;
+	}
+
+	/* Wait for DCBX complete event */
+	current_tmo = DCBX_COMP_TIMEOUT * HZ;
+	while (1) {
+		rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
+		    current_tmo);
+		if (!ha->idc_extend_tmo || rem_tmo) {
+			ha->idc_extend_tmo = 0;
+			break;
+		}
+		current_tmo = ha->idc_extend_tmo * HZ;
+		ha->idc_extend_tmo = 0;
+	}
+
+	if (!rem_tmo) {
+		ql_dbg(ql_dbg_user, vha, 0x7022,
+		    "DCBX completion not received.\n");
+		ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+		/*
+		 * If the reset of the loopback mode doesn't work take a FCoE
+		 * dump and reset the chip.
+		 */
+		if (ret) {
+			ha->isp_ops->fw_dump(vha, 0);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		}
+		rval = -EINVAL;
+	} else {
+		if (ha->flags.idc_compl_status) {
+			ql_dbg(ql_dbg_user, vha, 0x70c3,
+			    "Bad status in IDC Completion AEN\n");
+			rval = -EINVAL;
+			ha->flags.idc_compl_status = 0;
+		} else
+			ql_dbg(ql_dbg_user, vha, 0x7023,
+			    "DCBX completion received.\n");
+	}
+
+	ha->notify_dcbx_comp = 0;
+	ha->idc_extend_tmo = 0;
+
+done_set_internal:
+	return rval;
+}
+
+static int
+qla2x00_process_loopback(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+	uint8_t command_sent;
+	char *type;
+	struct msg_echo_lb elreq;
+	uint16_t response[MAILBOX_REGISTER_COUNT];
+	uint16_t config[4], new_config[4];
+	uint8_t *fw_sts_ptr;
+	uint8_t *req_data = NULL;
+	dma_addr_t req_data_dma;
+	uint32_t req_data_len;
+	uint8_t *rsp_data = NULL;
+	dma_addr_t rsp_data_dma;
+	uint32_t rsp_data_len;
+
+	if (!vha->flags.online) {
+		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
+		return -EIO;
+	}
+
+	memset(&elreq, 0, sizeof(elreq));
+
+	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
+		DMA_TO_DEVICE);
+
+	if (!elreq.req_sg_cnt) {
+		ql_log(ql_log_warn, vha, 0x701a,
+		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
+		return -ENOMEM;
+	}
+
+	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+		DMA_FROM_DEVICE);
+
+	if (!elreq.rsp_sg_cnt) {
+		ql_log(ql_log_warn, vha, 0x701b,
+		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
+		rval = -ENOMEM;
+		goto done_unmap_req_sg;
+	}
+
+	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+		ql_log(ql_log_warn, vha, 0x701c,
+		    "dma mapping resulted in different sg counts, "
+		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
+		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
+		rval = -EAGAIN;
+		goto done_unmap_sg;
+	}
+	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+		&req_data_dma, GFP_KERNEL);
+	if (!req_data) {
+		ql_log(ql_log_warn, vha, 0x701d,
+		    "dma alloc failed for req_data.\n");
+		rval = -ENOMEM;
+		goto done_unmap_sg;
+	}
+
+	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+		&rsp_data_dma, GFP_KERNEL);
+	if (!rsp_data) {
+		ql_log(ql_log_warn, vha, 0x7004,
+		    "dma alloc failed for rsp_data.\n");
+		rval = -ENOMEM;
+		goto done_free_dma_req;
+	}
+
+	/* Copy the request buffer in req_data now */
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+	elreq.send_dma = req_data_dma;
+	elreq.rcv_dma = rsp_data_dma;
+	elreq.transfer_size = req_data_len;
+
+	elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+	elreq.iteration_count =
+	    bsg_request->rqst_data.h_vendor.vendor_cmd[2];
+
+	if (atomic_read(&vha->loop_state) == LOOP_READY &&
+	    (ha->current_topology == ISP_CFG_F ||
+	    (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+	     req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+	    elreq.options == EXTERNAL_LOOPBACK) {
+		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+		ql_dbg(ql_dbg_user, vha, 0x701e,
+		    "BSG request type: %s.\n", type);
+		command_sent = INT_DEF_LB_ECHO_CMD;
+		rval = qla2x00_echo_test(vha, &elreq, response);
+	} else {
+		if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
+			memset(config, 0, sizeof(config));
+			memset(new_config, 0, sizeof(new_config));
+
+			if (qla81xx_get_port_config(vha, config)) {
+				ql_log(ql_log_warn, vha, 0x701f,
+				    "Get port config failed.\n");
+				rval = -EPERM;
+				goto done_free_dma_rsp;
+			}
+
+			if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+				ql_dbg(ql_dbg_user, vha, 0x70c4,
+				    "Loopback operation already in "
+				    "progress.\n");
+				rval = -EAGAIN;
+				goto done_free_dma_rsp;
+			}
+
+			ql_dbg(ql_dbg_user, vha, 0x70c0,
+			    "elreq.options=%04x\n", elreq.options);
+
+			if (elreq.options == EXTERNAL_LOOPBACK)
+				if (IS_QLA8031(ha) || IS_QLA8044(ha))
+					rval = qla81xx_set_loopback_mode(vha,
+					    config, new_config, elreq.options);
+				else
+					rval = qla81xx_reset_loopback_mode(vha,
+					    config, 1, 0);
+			else
+				rval = qla81xx_set_loopback_mode(vha, config,
+				    new_config, elreq.options);
+
+			if (rval) {
+				rval = -EPERM;
+				goto done_free_dma_rsp;
+			}
+
+			type = "FC_BSG_HST_VENDOR_LOOPBACK";
+			ql_dbg(ql_dbg_user, vha, 0x7028,
+			    "BSG request type: %s.\n", type);
+
+			command_sent = INT_DEF_LB_LOOPBACK_CMD;
+			rval = qla2x00_loopback_test(vha, &elreq, response);
+
+			if (response[0] == MBS_COMMAND_ERROR &&
+					response[1] == MBS_LB_RESET) {
+				ql_log(ql_log_warn, vha, 0x7029,
+				    "MBX command error, Aborting ISP.\n");
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+				qla2x00_wait_for_chip_reset(vha);
+				/* Also reset the MPI */
+				if (IS_QLA81XX(ha)) {
+					if (qla81xx_restart_mpi_firmware(vha) !=
+					    QLA_SUCCESS) {
+						ql_log(ql_log_warn, vha, 0x702a,
+						    "MPI reset failed.\n");
+					}
+				}
+
+				rval = -EIO;
+				goto done_free_dma_rsp;
+			}
+
+			if (new_config[0]) {
+				int ret;
+
+				/* Revert back to original port config
+				 * Also clear internal loopback
+				 */
+				ret = qla81xx_reset_loopback_mode(vha,
+				    new_config, 0, 1);
+				if (ret) {
+					/*
+					 * If the reset of the loopback mode
+					 * doesn't work take FCoE dump and then
+					 * reset the chip.
+					 */
+					ha->isp_ops->fw_dump(vha, 0);
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+				}
+
+			}
+
+		} else {
+			type = "FC_BSG_HST_VENDOR_LOOPBACK";
+			ql_dbg(ql_dbg_user, vha, 0x702b,
+			    "BSG request type: %s.\n", type);
+			command_sent = INT_DEF_LB_LOOPBACK_CMD;
+			rval = qla2x00_loopback_test(vha, &elreq, response);
+		}
+	}
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x702c,
+		    "Vendor request %s failed.\n", type);
+
+		rval = 0;
+		bsg_reply->result = (DID_ERROR << 16);
+		bsg_reply->reply_payload_rcv_len = 0;
+	} else {
+		ql_dbg(ql_dbg_user, vha, 0x702d,
+		    "Vendor request %s completed.\n", type);
+		bsg_reply->result = (DID_OK << 16);
+		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+			bsg_job->reply_payload.sg_cnt, rsp_data,
+			rsp_data_len);
+	}
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+	    sizeof(response) + sizeof(uint8_t);
+	fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
+	    sizeof(struct fc_bsg_reply);
+	memcpy(fw_sts_ptr, response, sizeof(response));
+	fw_sts_ptr += sizeof(response);
+	*fw_sts_ptr = command_sent;
+
+done_free_dma_rsp:
+	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
+		rsp_data, rsp_data_dma);
+done_free_dma_req:
+	dma_free_coherent(&ha->pdev->dev, req_data_len,
+		req_data, req_data_dma);
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	if (!rval)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rval;
+}
+
+static int
+qla84xx_reset(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+	uint32_t flag;
+
+	if (!IS_QLA84XX(ha)) {
+		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
+		return -EINVAL;
+	}
+
+	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x7030,
+		    "Vendor request 84xx reset failed.\n");
+		rval = (DID_ERROR << 16);
+
+	} else {
+		ql_dbg(ql_dbg_user, vha, 0x7031,
+		    "Vendor request 84xx reset completed.\n");
+		bsg_reply->result = DID_OK;
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+
+	return rval;
+}
+
+static int
+qla84xx_updatefw(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	struct verify_chip_entry_84xx *mn = NULL;
+	dma_addr_t mn_dma, fw_dma;
+	void *fw_buf = NULL;
+	int rval = 0;
+	uint32_t sg_cnt;
+	uint32_t data_len;
+	uint16_t options;
+	uint32_t flag;
+	uint32_t fw_ver;
+
+	if (!IS_QLA84XX(ha)) {
+		ql_dbg(ql_dbg_user, vha, 0x7032,
+		    "Not 84xx, exiting.\n");
+		return -EINVAL;
+	}
+
+	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	if (!sg_cnt) {
+		ql_log(ql_log_warn, vha, 0x7033,
+		    "dma_map_sg returned %d for request.\n", sg_cnt);
+		return -ENOMEM;
+	}
+
+	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+		ql_log(ql_log_warn, vha, 0x7034,
+		    "DMA mapping resulted in different sg counts, "
+		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+		    bsg_job->request_payload.sg_cnt, sg_cnt);
+		rval = -EAGAIN;
+		goto done_unmap_sg;
+	}
+
+	data_len = bsg_job->request_payload.payload_len;
+	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
+		&fw_dma, GFP_KERNEL);
+	if (!fw_buf) {
+		ql_log(ql_log_warn, vha, 0x7035,
+		    "DMA alloc failed for fw_buf.\n");
+		rval = -ENOMEM;
+		goto done_unmap_sg;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
+
+	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+	if (!mn) {
+		ql_log(ql_log_warn, vha, 0x7036,
+		    "DMA alloc failed for fw buffer.\n");
+		rval = -ENOMEM;
+		goto done_free_fw_buf;
+	}
+
+	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
+
+	memset(mn, 0, sizeof(struct access_chip_84xx));
+	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
+	mn->entry_count = 1;
+
+	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
+	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
+		options |= VCO_DIAG_FW;
+
+	mn->options = cpu_to_le16(options);
+	mn->fw_ver =  cpu_to_le32(fw_ver);
+	mn->fw_size =  cpu_to_le32(data_len);
+	mn->fw_seq_size =  cpu_to_le32(data_len);
+	mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
+	mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
+	mn->dseg_length = cpu_to_le32(data_len);
+	mn->data_seg_cnt = cpu_to_le16(1);
+
+	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x7037,
+		    "Vendor request 84xx updatefw failed.\n");
+
+		rval = (DID_ERROR << 16);
+	} else {
+		ql_dbg(ql_dbg_user, vha, 0x7038,
+		    "Vendor request 84xx updatefw completed.\n");
+
+		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+		bsg_reply->result = DID_OK;
+	}
+
+	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+done_free_fw_buf:
+	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
+
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+	if (!rval)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rval;
+}
+
+static int
+qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	struct access_chip_84xx *mn = NULL;
+	dma_addr_t mn_dma, mgmt_dma;
+	void *mgmt_b = NULL;
+	int rval = 0;
+	struct qla_bsg_a84_mgmt *ql84_mgmt;
+	uint32_t sg_cnt;
+	uint32_t data_len = 0;
+	uint32_t dma_direction = DMA_NONE;
+
+	if (!IS_QLA84XX(ha)) {
+		ql_log(ql_log_warn, vha, 0x703a,
+		    "Not 84xx, exiting.\n");
+		return -EINVAL;
+	}
+
+	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+	if (!mn) {
+		ql_log(ql_log_warn, vha, 0x703c,
+		    "DMA alloc failed for fw buffer.\n");
+		return -ENOMEM;
+	}
+
+	memset(mn, 0, sizeof(struct access_chip_84xx));
+	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+	mn->entry_count = 1;
+	ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
+	switch (ql84_mgmt->mgmt.cmd) {
+	case QLA84_MGMT_READ_MEM:
+	case QLA84_MGMT_GET_INFO:
+		sg_cnt = dma_map_sg(&ha->pdev->dev,
+			bsg_job->reply_payload.sg_list,
+			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+		if (!sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x703d,
+			    "dma_map_sg returned %d for reply.\n", sg_cnt);
+			rval = -ENOMEM;
+			goto exit_mgmt;
+		}
+
+		dma_direction = DMA_FROM_DEVICE;
+
+		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x703e,
+			    "DMA mapping resulted in different sg counts, "
+			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+			    bsg_job->reply_payload.sg_cnt, sg_cnt);
+			rval = -EAGAIN;
+			goto done_unmap_sg;
+		}
+
+		data_len = bsg_job->reply_payload.payload_len;
+
+		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+		    &mgmt_dma, GFP_KERNEL);
+		if (!mgmt_b) {
+			ql_log(ql_log_warn, vha, 0x703f,
+			    "DMA alloc failed for mgmt_b.\n");
+			rval = -ENOMEM;
+			goto done_unmap_sg;
+		}
+
+		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
+			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+			mn->parameter1 =
+				cpu_to_le32(
+				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+
+		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
+			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+			mn->parameter1 =
+				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
+
+			mn->parameter2 =
+				cpu_to_le32(
+				ql84_mgmt->mgmt.mgmtp.u.info.context);
+		}
+		break;
+
+	case QLA84_MGMT_WRITE_MEM:
+		sg_cnt = dma_map_sg(&ha->pdev->dev,
+			bsg_job->request_payload.sg_list,
+			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+		if (!sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x7040,
+			    "dma_map_sg returned %d.\n", sg_cnt);
+			rval = -ENOMEM;
+			goto exit_mgmt;
+		}
+
+		dma_direction = DMA_TO_DEVICE;
+
+		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x7041,
+			    "DMA mapping resulted in different sg counts, "
+			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+			    bsg_job->request_payload.sg_cnt, sg_cnt);
+			rval = -EAGAIN;
+			goto done_unmap_sg;
+		}
+
+		data_len = bsg_job->request_payload.payload_len;
+		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
+			&mgmt_dma, GFP_KERNEL);
+		if (!mgmt_b) {
+			ql_log(ql_log_warn, vha, 0x7042,
+			    "DMA alloc failed for mgmt_b.\n");
+			rval = -ENOMEM;
+			goto done_unmap_sg;
+		}
+
+		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
+
+		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+		mn->parameter1 =
+			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
+		break;
+
+	case QLA84_MGMT_CHNG_CONFIG:
+		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+		mn->parameter1 =
+			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
+
+		mn->parameter2 =
+			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
+
+		mn->parameter3 =
+			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
+		break;
+
+	default:
+		rval = -EIO;
+		goto exit_mgmt;
+	}
+
+	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
+		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
+		mn->dseg_count = cpu_to_le16(1);
+		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+		mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
+	}
+
+	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x7043,
+		    "Vendor request 84xx mgmt failed.\n");
+
+		rval = (DID_ERROR << 16);
+
+	} else {
+		ql_dbg(ql_dbg_user, vha, 0x7044,
+		    "Vendor request 84xx mgmt completed.\n");
+
+		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+		bsg_reply->result = DID_OK;
+
+		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
+			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
+			bsg_reply->reply_payload_rcv_len =
+				bsg_job->reply_payload.payload_len;
+
+			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+				bsg_job->reply_payload.sg_cnt, mgmt_b,
+				data_len);
+		}
+	}
+
+done_unmap_sg:
+	if (mgmt_b)
+		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
+
+	if (dma_direction == DMA_TO_DEVICE)
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	else if (dma_direction == DMA_FROM_DEVICE)
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+exit_mgmt:
+	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+	if (!rval)
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rval;
+}
+
+static int
+qla24xx_iidma(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	int rval = 0;
+	struct qla_port_param *port_param = NULL;
+	fc_port_t *fcport = NULL;
+	int found = 0;
+	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	uint8_t *rsp_ptr = NULL;
+
+	if (!IS_IIDMA_CAPABLE(vha->hw)) {
+		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
+		return -EINVAL;
+	}
+
+	port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
+	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
+		ql_log(ql_log_warn, vha, 0x7048,
+		    "Invalid destination type.\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->port_type != FCT_TARGET)
+			continue;
+
+		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
+			fcport->port_name, sizeof(fcport->port_name)))
+			continue;
+
+		found = 1;
+		break;
+	}
+
+	if (!found) {
+		ql_log(ql_log_warn, vha, 0x7049,
+		    "Failed to find port.\n");
+		return -EINVAL;
+	}
+
+	if (atomic_read(&fcport->state) != FCS_ONLINE) {
+		ql_log(ql_log_warn, vha, 0x704a,
+		    "Port is not online.\n");
+		return -EINVAL;
+	}
+
+	if (fcport->flags & FCF_LOGIN_NEEDED) {
+		ql_log(ql_log_warn, vha, 0x704b,
+		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
+		return -EINVAL;
+	}
+
+	if (port_param->mode)
+		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
+			port_param->speed, mb);
+	else
+		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
+			&port_param->speed, mb);
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x704c,
+		    "iIDMA cmd failed for %8phN -- "
+		    "%04x %x %04x %04x.\n", fcport->port_name,
+		    rval, fcport->fp_speed, mb[0], mb[1]);
+		rval = (DID_ERROR << 16);
+	} else {
+		if (!port_param->mode) {
+			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+				sizeof(struct qla_port_param);
+
+			rsp_ptr = ((uint8_t *)bsg_reply) +
+				sizeof(struct fc_bsg_reply);
+
+			memcpy(rsp_ptr, port_param,
+				sizeof(struct qla_port_param));
+		}
+
+		bsg_reply->result = DID_OK;
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+
+	return rval;
+}
+
+static int
+qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
+	uint8_t is_update)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	uint32_t start = 0;
+	int valid = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return -EINVAL;
+
+	start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+	if (start > ha->optrom_size) {
+		ql_log(ql_log_warn, vha, 0x7055,
+		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
+		return -EINVAL;
+	}
+
+	if (ha->optrom_state != QLA_SWAITING) {
+		ql_log(ql_log_info, vha, 0x7056,
+		    "optrom_state %d.\n", ha->optrom_state);
+		return -EBUSY;
+	}
+
+	ha->optrom_region_start = start;
+	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
+	if (is_update) {
+		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+			valid = 1;
+		else if (start == (ha->flt_region_boot * 4) ||
+		    start == (ha->flt_region_fw * 4))
+			valid = 1;
+		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
+			valid = 1;
+		if (!valid) {
+			ql_log(ql_log_warn, vha, 0x7058,
+			    "Invalid start region 0x%x/0x%x.\n", start,
+			    bsg_job->request_payload.payload_len);
+			return -EINVAL;
+		}
+
+		ha->optrom_region_size = start +
+		    bsg_job->request_payload.payload_len > ha->optrom_size ?
+		    ha->optrom_size - start :
+		    bsg_job->request_payload.payload_len;
+		ha->optrom_state = QLA_SWRITING;
+	} else {
+		ha->optrom_region_size = start +
+		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
+		    ha->optrom_size - start :
+		    bsg_job->reply_payload.payload_len;
+		ha->optrom_state = QLA_SREADING;
+	}
+
+	ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+	if (!ha->optrom_buffer) {
+		ql_log(ql_log_warn, vha, 0x7059,
+		    "Read: Unable to allocate memory for optrom retrieval "
+		    "(%x)\n", ha->optrom_region_size);
+
+		ha->optrom_state = QLA_SWAITING;
+		return -ENOMEM;
+	}
+
+	memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+	return 0;
+}
+
+static int
+qla2x00_read_optrom(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+
+	if (ha->flags.nic_core_reset_hdlr_active)
+		return -EBUSY;
+
+	mutex_lock(&ha->optrom_mutex);
+	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
+	if (rval) {
+		mutex_unlock(&ha->optrom_mutex);
+		return rval;
+	}
+
+	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+	    ha->optrom_region_start, ha->optrom_region_size);
+
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
+	    ha->optrom_region_size);
+
+	bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
+	bsg_reply->result = DID_OK;
+	vfree(ha->optrom_buffer);
+	ha->optrom_buffer = NULL;
+	ha->optrom_state = QLA_SWAITING;
+	mutex_unlock(&ha->optrom_mutex);
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return rval;
+}
+
+static int
+qla2x00_update_optrom(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+
+	mutex_lock(&ha->optrom_mutex);
+	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
+	if (rval) {
+		mutex_unlock(&ha->optrom_mutex);
+		return rval;
+	}
+
+	/* Set the isp82xx_no_md_cap not to capture minidump */
+	ha->flags.isp82xx_no_md_cap = 1;
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
+	    ha->optrom_region_size);
+
+	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+	    ha->optrom_region_start, ha->optrom_region_size);
+
+	bsg_reply->result = DID_OK;
+	vfree(ha->optrom_buffer);
+	ha->optrom_buffer = NULL;
+	ha->optrom_state = QLA_SWAITING;
+	mutex_unlock(&ha->optrom_mutex);
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return rval;
+}
+
+static int
+qla2x00_update_fru_versions(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+	uint8_t bsg[DMA_POOL_SIZE];
+	struct qla_image_version_list *list = (void *)bsg;
+	struct qla_image_version *image;
+	uint32_t count;
+	dma_addr_t sfp_dma;
+	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+	if (!sfp) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_NO_MEMORY;
+		goto done;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
+
+	image = list->version;
+	count = list->count;
+	while (count--) {
+		memcpy(sfp, &image->field_info, sizeof(image->field_info));
+		rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+		    image->field_address.device, image->field_address.offset,
+		    sizeof(image->field_info), image->field_address.option);
+		if (rval) {
+			bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+			    EXT_STATUS_MAILBOX;
+			goto dealloc;
+		}
+		image++;
+	}
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
+static int
+qla2x00_read_fru_status(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+	uint8_t bsg[DMA_POOL_SIZE];
+	struct qla_status_reg *sr = (void *)bsg;
+	dma_addr_t sfp_dma;
+	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+	if (!sfp) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_NO_MEMORY;
+		goto done;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
+
+	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+	    sr->field_address.device, sr->field_address.offset,
+	    sizeof(sr->status_reg), sr->field_address.option);
+	sr->status_reg = *sfp;
+
+	if (rval) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_MAILBOX;
+		goto dealloc;
+	}
+
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->reply_payload_rcv_len = sizeof(*sr);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
+static int
+qla2x00_write_fru_status(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+	uint8_t bsg[DMA_POOL_SIZE];
+	struct qla_status_reg *sr = (void *)bsg;
+	dma_addr_t sfp_dma;
+	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+	if (!sfp) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_NO_MEMORY;
+		goto done;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
+
+	*sfp = sr->status_reg;
+	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+	    sr->field_address.device, sr->field_address.offset,
+	    sizeof(sr->status_reg), sr->field_address.option);
+
+	if (rval) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_MAILBOX;
+		goto dealloc;
+	}
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
+static int
+qla2x00_write_i2c(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+	uint8_t bsg[DMA_POOL_SIZE];
+	struct qla_i2c_access *i2c = (void *)bsg;
+	dma_addr_t sfp_dma;
+	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+	if (!sfp) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_NO_MEMORY;
+		goto done;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+	memcpy(sfp, i2c->buffer, i2c->length);
+	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
+	    i2c->device, i2c->offset, i2c->length, i2c->option);
+
+	if (rval) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_MAILBOX;
+		goto dealloc;
+	}
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
+static int
+qla2x00_read_i2c(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+	uint8_t bsg[DMA_POOL_SIZE];
+	struct qla_i2c_access *i2c = (void *)bsg;
+	dma_addr_t sfp_dma;
+	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
+	if (!sfp) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_NO_MEMORY;
+		goto done;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
+
+	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
+		i2c->device, i2c->offset, i2c->length, i2c->option);
+
+	if (rval) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_MAILBOX;
+		goto dealloc;
+	}
+
+	memcpy(i2c->buffer, sfp, i2c->length);
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+
+dealloc:
+	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
+
+done:
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	return 0;
+}
+
+static int
+qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t rval = EXT_STATUS_OK;
+	uint16_t req_sg_cnt = 0;
+	uint16_t rsp_sg_cnt = 0;
+	uint16_t nextlid = 0;
+	uint32_t tot_dsds;
+	srb_t *sp = NULL;
+	uint32_t req_data_len;
+	uint32_t rsp_data_len;
+
+	/* Check the type of the adapter */
+	if (!IS_BIDI_CAPABLE(ha)) {
+		ql_log(ql_log_warn, vha, 0x70a0,
+			"This adapter is not supported\n");
+		rval = EXT_STATUS_NOT_SUPPORTED;
+		goto done;
+	}
+
+	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+		rval =  EXT_STATUS_BUSY;
+		goto done;
+	}
+
+	/* Check if host is online */
+	if (!vha->flags.online) {
+		ql_log(ql_log_warn, vha, 0x70a1,
+			"Host is not online\n");
+		rval = EXT_STATUS_DEVICE_OFFLINE;
+		goto done;
+	}
+
+	/* Check if cable is plugged in or not */
+	if (vha->device_flags & DFLG_NO_CABLE) {
+		ql_log(ql_log_warn, vha, 0x70a2,
+			"Cable is unplugged...\n");
+		rval = EXT_STATUS_INVALID_CFG;
+		goto done;
+	}
+
+	/* Check if the switch is connected or not */
+	if (ha->current_topology != ISP_CFG_F) {
+		ql_log(ql_log_warn, vha, 0x70a3,
+			"Host is not connected to the switch\n");
+		rval = EXT_STATUS_INVALID_CFG;
+		goto done;
+	}
+
+	/* Check if operating mode is P2P */
+	if (ha->operating_mode != P2P) {
+		ql_log(ql_log_warn, vha, 0x70a4,
+		    "Host operating mode is not P2p\n");
+		rval = EXT_STATUS_INVALID_CFG;
+		goto done;
+	}
+
+	mutex_lock(&ha->selflogin_lock);
+	if (vha->self_login_loop_id == 0) {
+		/* Initialize all required  fields of fcport */
+		vha->bidir_fcport.vha = vha;
+		vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
+		vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
+		vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
+		vha->bidir_fcport.loop_id = vha->loop_id;
+
+		if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
+			ql_log(ql_log_warn, vha, 0x70a7,
+			    "Failed to login port %06X for bidirectional IOCB\n",
+			    vha->bidir_fcport.d_id.b24);
+			mutex_unlock(&ha->selflogin_lock);
+			rval = EXT_STATUS_MAILBOX;
+			goto done;
+		}
+		vha->self_login_loop_id = nextlid - 1;
+
+	}
+	/* Assign the self login loop id to fcport */
+	mutex_unlock(&ha->selflogin_lock);
+
+	vha->bidir_fcport.loop_id = vha->self_login_loop_id;
+
+	req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		bsg_job->request_payload.sg_list,
+		bsg_job->request_payload.sg_cnt,
+		DMA_TO_DEVICE);
+
+	if (!req_sg_cnt) {
+		rval = EXT_STATUS_NO_MEMORY;
+		goto done;
+	}
+
+	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
+		DMA_FROM_DEVICE);
+
+	if (!rsp_sg_cnt) {
+		rval = EXT_STATUS_NO_MEMORY;
+		goto done_unmap_req_sg;
+	}
+
+	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
+		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
+		ql_dbg(ql_dbg_user, vha, 0x70a9,
+		    "Dma mapping resulted in different sg counts "
+		    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
+		    "%x dma_reply_sg_cnt: %x]\n",
+		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
+		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+		rval = EXT_STATUS_NO_MEMORY;
+		goto done_unmap_sg;
+	}
+
+	req_data_len = bsg_job->request_payload.payload_len;
+	rsp_data_len = bsg_job->reply_payload.payload_len;
+
+	if (req_data_len != rsp_data_len) {
+		rval = EXT_STATUS_BUSY;
+		ql_log(ql_log_warn, vha, 0x70aa,
+		    "req_data_len != rsp_data_len\n");
+		goto done_unmap_sg;
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
+	if (!sp) {
+		ql_dbg(ql_dbg_user, vha, 0x70ac,
+		    "Alloc SRB structure failed\n");
+		rval = EXT_STATUS_NO_MEMORY;
+		goto done_unmap_sg;
+	}
+
+	/*Populate srb->ctx with bidir ctx*/
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->type = SRB_BIDI_CMD;
+	sp->done = qla2x00_bsg_job_done;
+
+	/* Add the read and write sg count */
+	tot_dsds = rsp_sg_cnt + req_sg_cnt;
+
+	rval = qla2x00_start_bidir(sp, vha, tot_dsds);
+	if (rval != EXT_STATUS_OK)
+		goto done_free_srb;
+	/* the bsg request  will be completed in the interrupt handler */
+	return rval;
+
+done_free_srb:
+	mempool_free(sp, ha->srb_mempool);
+done_unmap_sg:
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+	dma_unmap_sg(&ha->pdev->dev,
+	    bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+done:
+
+	/* Return an error vendor specific response
+	 * and complete the bsg request
+	 */
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->reply_payload_rcv_len = 0;
+	bsg_reply->result = (DID_OK) << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	/* Always return success, vendor rsp carries correct status */
+	return 0;
+}
+
+static int
+qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = (DID_ERROR << 16);
+	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+	srb_t *sp;
+	int req_sg_cnt = 0, rsp_sg_cnt = 0;
+	struct fc_port *fcport;
+	char  *type = "FC_BSG_HST_FX_MGMT";
+
+	/* Copy the IOCB specific information */
+	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+	    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+	/* Dump the vendor information */
+	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
+	    (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+
+	if (!vha->flags.online) {
+		ql_log(ql_log_warn, vha, 0x70d0,
+		    "Host is not online.\n");
+		rval = -EIO;
+		goto done;
+	}
+
+	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+		req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		    bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+		if (!req_sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x70c7,
+			    "dma_map_sg return %d for request\n", req_sg_cnt);
+			rval = -ENOMEM;
+			goto done;
+		}
+	}
+
+	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+		rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		    bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+		if (!rsp_sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x70c8,
+			    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+			rval = -ENOMEM;
+			goto done_unmap_req_sg;
+		}
+	}
+
+	ql_dbg(ql_dbg_user, vha, 0x70c9,
+	    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+	    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+	    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+
+	/* Allocate a dummy fcport structure, since functions preparing the
+	 * IOCB and mailbox command retrieves port specific information
+	 * from fcport structure. For Host based ELS commands there will be
+	 * no fcport structure allocated
+	 */
+	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (!fcport) {
+		ql_log(ql_log_warn, vha, 0x70ca,
+		    "Failed to allocate fcport.\n");
+		rval = -ENOMEM;
+		goto done_unmap_rsp_sg;
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_warn, vha, 0x70cb,
+		    "qla2x00_get_sp failed.\n");
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+
+	/* Initialize all required  fields of fcport */
+	fcport->vha = vha;
+	fcport->loop_id = piocb_rqst->dataword;
+
+	sp->type = SRB_FXIOCB_BCMD;
+	sp->name = "bsg_fx_mgmt";
+	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->done = qla2x00_bsg_job_done;
+
+	ql_dbg(ql_dbg_user, vha, 0x70cc,
+	    "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
+	    type, piocb_rqst->func_type, fcport->loop_id);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x70cd,
+		    "qla2x00_start_sp failed=%d.\n", rval);
+		mempool_free(sp, ha->srb_mempool);
+		rval = -EIO;
+		goto done_free_fcport;
+	}
+	return rval;
+
+done_free_fcport:
+	kfree(fcport);
+
+done_unmap_rsp_sg:
+	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+		dma_unmap_sg(&ha->pdev->dev,
+		    bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+		dma_unmap_sg(&ha->pdev->dev,
+		    bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+done:
+	return rval;
+}
+
+static int
+qla26xx_serdes_op(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	int rval = 0;
+	struct qla_serdes_reg sr;
+
+	memset(&sr, 0, sizeof(sr));
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+	switch (sr.cmd) {
+	case INT_SC_SERDES_WRITE_REG:
+		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
+		bsg_reply->reply_payload_rcv_len = 0;
+		break;
+	case INT_SC_SERDES_READ_REG:
+		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
+		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+		bsg_reply->reply_payload_rcv_len = sizeof(sr);
+		break;
+	default:
+		ql_dbg(ql_dbg_user, vha, 0x708c,
+		    "Unknown serdes cmd %x.\n", sr.cmd);
+		rval = -EINVAL;
+		break;
+	}
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+	    rval ? EXT_STATUS_MAILBOX : 0;
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
+}
+
+static int
+qla8044_serdes_op(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	int rval = 0;
+	struct qla_serdes_reg_ex sr;
+
+	memset(&sr, 0, sizeof(sr));
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+	switch (sr.cmd) {
+	case INT_SC_SERDES_WRITE_REG:
+		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
+		bsg_reply->reply_payload_rcv_len = 0;
+		break;
+	case INT_SC_SERDES_READ_REG:
+		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
+		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+		bsg_reply->reply_payload_rcv_len = sizeof(sr);
+		break;
+	default:
+		ql_dbg(ql_dbg_user, vha, 0x7020,
+		    "Unknown serdes cmd %x.\n", sr.cmd);
+		rval = -EINVAL;
+		break;
+	}
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+	    rval ? EXT_STATUS_MAILBOX : 0;
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
+}
+
+static int
+qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_flash_update_caps cap;
+
+	if (!(IS_QLA27XX(ha)))
+		return -EPERM;
+
+	memset(&cap, 0, sizeof(cap));
+	cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+			   (uint64_t)ha->fw_attributes_ext[0] << 32 |
+			   (uint64_t)ha->fw_attributes_h << 16 |
+			   (uint64_t)ha->fw_attributes;
+
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
+	bsg_reply->reply_payload_rcv_len = sizeof(cap);
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+	    EXT_STATUS_OK;
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
+}
+
+static int
+qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	uint64_t online_fw_attr = 0;
+	struct qla_flash_update_caps cap;
+
+	if (!(IS_QLA27XX(ha)))
+		return -EPERM;
+
+	memset(&cap, 0, sizeof(cap));
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
+
+	online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+			 (uint64_t)ha->fw_attributes_ext[0] << 32 |
+			 (uint64_t)ha->fw_attributes_h << 16 |
+			 (uint64_t)ha->fw_attributes;
+
+	if (online_fw_attr != cap.capabilities) {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_INVALID_PARAM;
+		return -EINVAL;
+	}
+
+	if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+		    EXT_STATUS_INVALID_PARAM;
+		return -EINVAL;
+	}
+
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+	    EXT_STATUS_OK;
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
+}
+
+static int
+qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_bbcr_data bbcr;
+	uint16_t loop_id, topo, sw_cap;
+	uint8_t domain, area, al_pa, state;
+	int rval;
+
+	if (!(IS_QLA27XX(ha)))
+		return -EPERM;
+
+	memset(&bbcr, 0, sizeof(bbcr));
+
+	if (vha->flags.bbcr_enable)
+		bbcr.status = QLA_BBCR_STATUS_ENABLED;
+	else
+		bbcr.status = QLA_BBCR_STATUS_DISABLED;
+
+	if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
+		rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
+			&area, &domain, &topo, &sw_cap);
+		if (rval != QLA_SUCCESS) {
+			bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
+			bbcr.state = QLA_BBCR_STATE_OFFLINE;
+			bbcr.mbx1 = loop_id;
+			goto done;
+		}
+
+		state = (vha->bbcr >> 12) & 0x1;
+
+		if (state) {
+			bbcr.state = QLA_BBCR_STATE_OFFLINE;
+			bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
+		} else {
+			bbcr.state = QLA_BBCR_STATE_ONLINE;
+			bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
+		}
+
+		bbcr.configured_bbscn = vha->bbcr & 0xf;
+	}
+
+done:
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+		bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
+	bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
+
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
+}
+
+static int
+qla2x00_get_priv_stats(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	struct link_statistics *stats = NULL;
+	dma_addr_t stats_dma;
+	int rval;
+	uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
+	uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
+
+	if (test_bit(UNLOADING, &vha->dpc_flags))
+		return -ENODEV;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return -ENODEV;
+
+	if (qla2x00_reset_active(vha))
+		return -EBUSY;
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return -EPERM;
+
+	stats = dma_alloc_coherent(&ha->pdev->dev,
+		sizeof(*stats), &stats_dma, GFP_KERNEL);
+	if (!stats) {
+		ql_log(ql_log_warn, vha, 0x70e2,
+		    "Failed to allocate memory for stats.\n");
+		return -ENOMEM;
+	}
+
+	memset(stats, 0, sizeof(*stats));
+
+	rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
+
+	if (rval == QLA_SUCCESS) {
+		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
+		    (uint8_t *)stats, sizeof(*stats));
+		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
+	}
+
+	bsg_reply->reply_payload_rcv_len = sizeof(*stats);
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
+
+	bsg_job->reply_len = sizeof(*bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
+		stats, stats_dma);
+
+	return 0;
+}
+
+static int
+qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	scsi_qla_host_t *vha = shost_priv(host);
+	int rval;
+	struct qla_dport_diag *dd;
+
+	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+		return -EPERM;
+
+	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+	if (!dd) {
+		ql_log(ql_log_warn, vha, 0x70db,
+		    "Failed to allocate memory for dport.\n");
+		return -ENOMEM;
+	}
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+	rval = qla26xx_dport_diagnostics(
+	    vha, dd->buf, sizeof(dd->buf), dd->options);
+	if (rval == QLA_SUCCESS) {
+		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+	}
+
+	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
+
+	bsg_job->reply_len = sizeof(*bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	kfree(dd);
+
+	return 0;
+}
+
+static int
+qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+
+	switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
+	case QL_VND_LOOPBACK:
+		return qla2x00_process_loopback(bsg_job);
+
+	case QL_VND_A84_RESET:
+		return qla84xx_reset(bsg_job);
+
+	case QL_VND_A84_UPDATE_FW:
+		return qla84xx_updatefw(bsg_job);
+
+	case QL_VND_A84_MGMT_CMD:
+		return qla84xx_mgmt_cmd(bsg_job);
+
+	case QL_VND_IIDMA:
+		return qla24xx_iidma(bsg_job);
+
+	case QL_VND_FCP_PRIO_CFG_CMD:
+		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
+
+	case QL_VND_READ_FLASH:
+		return qla2x00_read_optrom(bsg_job);
+
+	case QL_VND_UPDATE_FLASH:
+		return qla2x00_update_optrom(bsg_job);
+
+	case QL_VND_SET_FRU_VERSION:
+		return qla2x00_update_fru_versions(bsg_job);
+
+	case QL_VND_READ_FRU_STATUS:
+		return qla2x00_read_fru_status(bsg_job);
+
+	case QL_VND_WRITE_FRU_STATUS:
+		return qla2x00_write_fru_status(bsg_job);
+
+	case QL_VND_WRITE_I2C:
+		return qla2x00_write_i2c(bsg_job);
+
+	case QL_VND_READ_I2C:
+		return qla2x00_read_i2c(bsg_job);
+
+	case QL_VND_DIAG_IO_CMD:
+		return qla24xx_process_bidir_cmd(bsg_job);
+
+	case QL_VND_FX00_MGMT_CMD:
+		return qlafx00_mgmt_cmd(bsg_job);
+
+	case QL_VND_SERDES_OP:
+		return qla26xx_serdes_op(bsg_job);
+
+	case QL_VND_SERDES_OP_EX:
+		return qla8044_serdes_op(bsg_job);
+
+	case QL_VND_GET_FLASH_UPDATE_CAPS:
+		return qla27xx_get_flash_upd_cap(bsg_job);
+
+	case QL_VND_SET_FLASH_UPDATE_CAPS:
+		return qla27xx_set_flash_upd_cap(bsg_job);
+
+	case QL_VND_GET_BBCR_DATA:
+		return qla27xx_get_bbcr_data(bsg_job);
+
+	case QL_VND_GET_PRIV_STATS:
+	case QL_VND_GET_PRIV_STATS_EX:
+		return qla2x00_get_priv_stats(bsg_job);
+
+	case QL_VND_DPORT_DIAGNOSTICS:
+		return qla2x00_do_dport_diagnostics(bsg_job);
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+int
+qla24xx_bsg_request(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	int ret = -EINVAL;
+	struct fc_rport *rport;
+	struct Scsi_Host *host;
+	scsi_qla_host_t *vha;
+
+	/* In case no data transferred. */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+		rport = fc_bsg_to_rport(bsg_job);
+		host = rport_to_shost(rport);
+		vha = shost_priv(host);
+	} else {
+		host = fc_bsg_to_shost(bsg_job);
+		vha = shost_priv(host);
+	}
+
+	if (qla2x00_reset_active(vha)) {
+		ql_dbg(ql_dbg_user, vha, 0x709f,
+		    "BSG: ISP abort active/needed -- cmd=%d.\n",
+		    bsg_request->msgcode);
+		return -EBUSY;
+	}
+
+	ql_dbg(ql_dbg_user, vha, 0x7000,
+	    "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
+
+	switch (bsg_request->msgcode) {
+	case FC_BSG_RPT_ELS:
+	case FC_BSG_HST_ELS_NOLOGIN:
+		ret = qla2x00_process_els(bsg_job);
+		break;
+	case FC_BSG_HST_CT:
+		ret = qla2x00_process_ct(bsg_job);
+		break;
+	case FC_BSG_HST_VENDOR:
+		ret = qla2x00_process_vendor_specific(bsg_job);
+		break;
+	case FC_BSG_HST_ADD_RPORT:
+	case FC_BSG_HST_DEL_RPORT:
+	case FC_BSG_RPT_CT:
+	default:
+		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
+		break;
+	}
+	return ret;
+}
+
+int
+qla24xx_bsg_timeout(struct bsg_job *bsg_job)
+{
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp;
+	int cnt, que;
+	unsigned long flags;
+	struct req_que *req;
+
+	/* find the bsg job from the active list of commands */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (que = 0; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			continue;
+
+		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+			sp = req->outstanding_cmds[cnt];
+			if (sp) {
+				if (((sp->type == SRB_CT_CMD) ||
+					(sp->type == SRB_ELS_CMD_HST) ||
+					(sp->type == SRB_FXIOCB_BCMD))
+					&& (sp->u.bsg_job == bsg_job)) {
+					req->outstanding_cmds[cnt] = NULL;
+					spin_unlock_irqrestore(&ha->hardware_lock, flags);
+					if (ha->isp_ops->abort_command(sp)) {
+						ql_log(ql_log_warn, vha, 0x7089,
+						    "mbx abort_command "
+						    "failed.\n");
+						scsi_req(bsg_job->req)->result =
+						bsg_reply->result = -EIO;
+					} else {
+						ql_dbg(ql_dbg_user, vha, 0x708a,
+						    "mbx abort_command "
+						    "success.\n");
+						scsi_req(bsg_job->req)->result =
+						bsg_reply->result = 0;
+					}
+					spin_lock_irqsave(&ha->hardware_lock, flags);
+					goto done;
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
+	scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO;
+	return 0;
+
+done:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	sp->free(sp);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_bsg.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_bsg.h
new file mode 100644
index 0000000..d97dfd5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_bsg.h
@@ -0,0 +1,282 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_BSG_H
+#define __QLA_BSG_H
+
+/* BSG Vendor specific commands */
+#define QL_VND_LOOPBACK		0x01
+#define QL_VND_A84_RESET	0x02
+#define QL_VND_A84_UPDATE_FW	0x03
+#define QL_VND_A84_MGMT_CMD	0x04
+#define QL_VND_IIDMA		0x05
+#define QL_VND_FCP_PRIO_CFG_CMD	0x06
+#define QL_VND_READ_FLASH	0x07
+#define QL_VND_UPDATE_FLASH	0x08
+#define QL_VND_SET_FRU_VERSION	0x0B
+#define QL_VND_READ_FRU_STATUS	0x0C
+#define QL_VND_WRITE_FRU_STATUS	0x0D
+#define QL_VND_DIAG_IO_CMD	0x0A
+#define QL_VND_WRITE_I2C	0x10
+#define QL_VND_READ_I2C		0x11
+#define QL_VND_FX00_MGMT_CMD	0x12
+#define QL_VND_SERDES_OP	0x13
+#define	QL_VND_SERDES_OP_EX	0x14
+#define QL_VND_GET_FLASH_UPDATE_CAPS    0x15
+#define QL_VND_SET_FLASH_UPDATE_CAPS    0x16
+#define QL_VND_GET_BBCR_DATA    0x17
+#define QL_VND_GET_PRIV_STATS	0x18
+#define QL_VND_DPORT_DIAGNOSTICS	0x19
+#define QL_VND_GET_PRIV_STATS_EX	0x1A
+
+/* BSG Vendor specific subcode returns */
+#define EXT_STATUS_OK			0
+#define EXT_STATUS_ERR			1
+#define EXT_STATUS_BUSY			2
+#define EXT_STATUS_INVALID_PARAM	6
+#define EXT_STATUS_DATA_OVERRUN		7
+#define EXT_STATUS_DATA_UNDERRUN	8
+#define EXT_STATUS_MAILBOX		11
+#define EXT_STATUS_NO_MEMORY		17
+#define EXT_STATUS_DEVICE_OFFLINE	22
+
+/*
+ * To support bidirectional iocb
+ * BSG Vendor specific returns
+ */
+#define EXT_STATUS_NOT_SUPPORTED	27
+#define EXT_STATUS_INVALID_CFG		28
+#define EXT_STATUS_DMA_ERR		29
+#define EXT_STATUS_TIMEOUT		30
+#define EXT_STATUS_THREAD_FAILED	31
+#define EXT_STATUS_DATA_CMP_FAILED	32
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD         0
+#define INT_DEF_LB_ECHO_CMD             1
+
+/* Loopback related definations */
+#define INTERNAL_LOOPBACK		0xF1
+#define EXTERNAL_LOOPBACK		0xF2
+#define ENABLE_INTERNAL_LOOPBACK	0x02
+#define ENABLE_EXTERNAL_LOOPBACK	0x04
+#define INTERNAL_LOOPBACK_MASK		0x000E
+#define MAX_ELS_FRAME_PAYLOAD		252
+#define ELS_OPCODE_BYTE			0x10
+
+/* BSG Vendor specific definations */
+#define A84_ISSUE_WRITE_TYPE_CMD        0
+#define A84_ISSUE_READ_TYPE_CMD         1
+#define A84_CLEANUP_CMD                 2
+#define A84_ISSUE_RESET_OP_FW           3
+#define A84_ISSUE_RESET_DIAG_FW         4
+#define A84_ISSUE_UPDATE_OPFW_CMD       5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD     6
+
+struct qla84_mgmt_param {
+	union {
+		struct {
+			uint32_t start_addr;
+		} mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+		struct {
+			uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF        1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS   2
+#define QLA84_MGMT_CONFIG_ID_PAUSE      3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS   4
+
+		uint32_t param0;
+		uint32_t param1;
+	} config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+	struct {
+		uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA         1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA                2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT               3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT                4 /* Get LIF Statistics  */
+#define QLA84_MGMT_INFO_ASIC_STAT               5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS           6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG               7 /* Get Panic Log */
+
+		uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG                    0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG                    1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG           2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG            3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG     4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG      5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG         6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG          7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG               8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG                      9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0   0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1   1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0        2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1        3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0         4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1         5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0     0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1     1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0           2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1           3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU                6
+
+		} info; /* for QLA84_MGMT_GET_INFO */
+	} u;
+};
+
+struct qla84_msg_mgmt {
+	uint16_t cmd;
+#define QLA84_MGMT_READ_MEM     0x00
+#define QLA84_MGMT_WRITE_MEM    0x01
+#define QLA84_MGMT_CHNG_CONFIG  0x02
+#define QLA84_MGMT_GET_INFO     0x03
+	uint16_t rsrvd;
+	struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+	uint32_t len; /* bytes in payload following this struct */
+	uint8_t payload[0]; /* payload for cmd */
+};
+
+struct qla_bsg_a84_mgmt {
+	struct qla84_msg_mgmt mgmt;
+} __attribute__ ((packed));
+
+struct qla_scsi_addr {
+	uint16_t bus;
+	uint16_t target;
+} __attribute__ ((packed));
+
+struct qla_ext_dest_addr {
+	union {
+		uint8_t wwnn[8];
+		uint8_t wwpn[8];
+		uint8_t id[4];
+		struct qla_scsi_addr scsi_addr;
+	} dest_addr;
+	uint16_t dest_type;
+#define	EXT_DEF_TYPE_WWPN	2
+	uint16_t lun;
+	uint16_t padding[2];
+} __attribute__ ((packed));
+
+struct qla_port_param {
+	struct qla_ext_dest_addr fc_scsi_addr;
+	uint16_t mode;
+	uint16_t speed;
+} __attribute__ ((packed));
+
+
+/* FRU VPD */
+
+#define MAX_FRU_SIZE	36
+
+struct qla_field_address {
+	uint16_t offset;
+	uint16_t device;
+	uint16_t option;
+} __packed;
+
+struct qla_field_info {
+	uint8_t version[MAX_FRU_SIZE];
+} __packed;
+
+struct qla_image_version {
+	struct qla_field_address field_address;
+	struct qla_field_info field_info;
+} __packed;
+
+struct qla_image_version_list {
+	uint32_t count;
+	struct qla_image_version version[0];
+} __packed;
+
+struct qla_status_reg {
+	struct qla_field_address field_address;
+	uint8_t status_reg;
+	uint8_t reserved[7];
+} __packed;
+
+struct qla_i2c_access {
+	uint16_t device;
+	uint16_t offset;
+	uint16_t option;
+	uint16_t length;
+	uint8_t  buffer[0x40];
+} __packed;
+
+/* 26xx serdes register interface */
+
+/* serdes reg commands */
+#define INT_SC_SERDES_READ_REG		1
+#define INT_SC_SERDES_WRITE_REG		2
+
+struct qla_serdes_reg {
+	uint16_t cmd;
+	uint16_t addr;
+	uint16_t val;
+} __packed;
+
+struct qla_serdes_reg_ex {
+	uint16_t cmd;
+	uint32_t addr;
+	uint32_t val;
+} __packed;
+
+struct qla_flash_update_caps {
+	uint64_t  capabilities;
+	uint32_t  outage_duration;
+	uint8_t   reserved[20];
+} __packed;
+
+/* BB_CR Status */
+#define QLA_BBCR_STATUS_DISABLED       0
+#define QLA_BBCR_STATUS_ENABLED        1
+#define QLA_BBCR_STATUS_UNKNOWN        2
+
+/* BB_CR State */
+#define QLA_BBCR_STATE_OFFLINE         0
+#define QLA_BBCR_STATE_ONLINE          1
+
+/* BB_CR Offline Reason Code */
+#define QLA_BBCR_REASON_PORT_SPEED     1
+#define QLA_BBCR_REASON_PEER_PORT      2
+#define QLA_BBCR_REASON_SWITCH         3
+#define QLA_BBCR_REASON_LOGIN_REJECT   4
+
+struct  qla_bbcr_data {
+	uint8_t   status;         /* 1 - enabled, 0 - Disabled */
+	uint8_t   state;          /* 1 - online, 0 - offline */
+	uint8_t   configured_bbscn;       /* 0-15 */
+	uint8_t   negotiated_bbscn;       /* 0-15 */
+	uint8_t   offline_reason_code;
+	uint16_t  mbx1;			/* Port state */
+	uint8_t   reserved[9];
+} __packed;
+
+struct qla_dport_diag {
+	uint16_t options;
+	uint32_t buf[16];
+	uint8_t  unused[62];
+} __packed;
+
+/* D_Port options */
+#define QLA_DPORT_RESULT	0x0
+#define QLA_DPORT_START		0x2
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dbg.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dbg.c
new file mode 100644
index 0000000..91e1857
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dbg.c
@@ -0,0 +1,2858 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+/*
+ * Table for showing the current message id in use for particular level
+ * Change this table for addition of log/debug messages.
+ * ----------------------------------------------------------------------
+ * |             Level            |   Last Value Used  |     Holes	|
+ * ----------------------------------------------------------------------
+ * | Module Init and Probe        |       0x0193       | 0x0146         |
+ * |                              |                    | 0x015b-0x0160	|
+ * |                              |                    | 0x016e		|
+ * | Mailbox commands             |       0x1205       | 0x11a2-0x11ff	|
+ * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
+ * |				  | 		       | 0x211a         |
+ * |                              |                    | 0x211c-0x2128  |
+ * |                              |                    | 0x212a-0x2130  |
+ * | Queue Command and IO tracing |       0x3074       | 0x300b         |
+ * |                              |                    | 0x3027-0x3028  |
+ * |                              |                    | 0x303d-0x3041  |
+ * |                              |                    | 0x302d,0x3033  |
+ * |                              |                    | 0x3036,0x3038  |
+ * |                              |                    | 0x303a		|
+ * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
+ * | Async Events                 |       0x5090       | 0x502b-0x502f  |
+ * |				  | 		       | 0x5047         |
+ * |                              |                    | 0x5084,0x5075	|
+ * |                              |                    | 0x503d,0x5044  |
+ * |                              |                    | 0x505f		|
+ * | Timer Routines               |       0x6012       |                |
+ * | User Space Interactions      |       0x70e3       | 0x7018,0x702e  |
+ * |				  |		       | 0x7020,0x7024  |
+ * |                              |                    | 0x7039,0x7045  |
+ * |                              |                    | 0x7073-0x7075  |
+ * |                              |                    | 0x70a5-0x70a6  |
+ * |                              |                    | 0x70a8,0x70ab  |
+ * |                              |                    | 0x70ad-0x70ae  |
+ * |                              |                    | 0x70d0-0x70d6	|
+ * |                              |                    | 0x70d7-0x70db  |
+ * | Task Management              |       0x8042       | 0x8000         |
+ * |                              |                    | 0x8019         |
+ * |                              |                    | 0x8025,0x8026  |
+ * |                              |                    | 0x8031,0x8032  |
+ * |                              |                    | 0x8039,0x803c  |
+ * | AER/EEH                      |       0x9011       |		|
+ * | Virtual Port                 |       0xa007       |		|
+ * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |
+ * |                              |                    | 0xb09e,0xb0ae  |
+ * |				  |		       | 0xb0c3,0xb0c6  |
+ * |                              |                    | 0xb0e0-0xb0ef  |
+ * |                              |                    | 0xb085,0xb0dc  |
+ * |                              |                    | 0xb107,0xb108  |
+ * |                              |                    | 0xb111,0xb11e  |
+ * |                              |                    | 0xb12c,0xb12d  |
+ * |                              |                    | 0xb13a,0xb142  |
+ * |                              |                    | 0xb13c-0xb140  |
+ * |                              |                    | 0xb149		|
+ * | MultiQ                       |       0xc010       |		|
+ * | Misc                         |       0xd302       | 0xd031-0xd0ff	|
+ * |                              |                    | 0xd101-0xd1fe	|
+ * |                              |                    | 0xd214-0xd2fe	|
+ * | Target Mode		  |	  0xe081       |		|
+ * | Target Mode Management	  |	  0xf09b       | 0xf002		|
+ * |                              |                    | 0xf046-0xf049  |
+ * | Target Mode Task Management  |	  0x1000d      |		|
+ * ----------------------------------------------------------------------
+ */
+
+#include "qla_def.h"
+
+#include <linux/delay.h>
+
+static uint32_t ql_dbg_offset = 0x800;
+
+static inline void
+qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
+{
+	fw_dump->fw_major_version = htonl(ha->fw_major_version);
+	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
+	fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
+	fw_dump->fw_attributes = htonl(ha->fw_attributes);
+
+	fw_dump->vendor = htonl(ha->pdev->vendor);
+	fw_dump->device = htonl(ha->pdev->device);
+	fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
+	fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
+}
+
+static inline void *
+qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
+{
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+	/* Request queue. */
+	memcpy(ptr, req->ring, req->length *
+	    sizeof(request_t));
+
+	/* Response queue. */
+	ptr += req->length * sizeof(request_t);
+	memcpy(ptr, rsp->ring, rsp->length  *
+	    sizeof(response_t));
+
+	return ptr + (rsp->length * sizeof(response_t));
+}
+
+int
+qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+	uint32_t ram_dwords, void **nxt)
+{
+	int rval;
+	uint32_t cnt, stat, timer, dwords, idx;
+	uint16_t mb0;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	dma_addr_t dump_dma = ha->gid_list_dma;
+	uint32_t *dump = (uint32_t *)ha->gid_list;
+
+	rval = QLA_SUCCESS;
+	mb0 = 0;
+
+	WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	dwords = qla2x00_gid_list_size(ha) / 4;
+	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+	    cnt += dwords, addr += dwords) {
+		if (cnt + dwords > ram_dwords)
+			dwords = ram_dwords - cnt;
+
+		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+
+		WRT_REG_WORD(&reg->mailbox9, 0);
+		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+		ha->flags.mbox_int = 0;
+		for (timer = 6000000; timer; timer--) {
+			/* Check for pending interrupts. */
+			stat = RD_REG_DWORD(&reg->host_status);
+			if (stat & HSRX_RISC_INT) {
+				stat &= 0xff;
+
+				if (stat == 0x1 || stat == 0x2 ||
+				    stat == 0x10 || stat == 0x11) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_REG_WORD(&reg->mailbox0);
+					RD_REG_WORD(&reg->mailbox1);
+
+					WRT_REG_DWORD(&reg->hccr,
+					    HCCRX_CLR_RISC_INT);
+					RD_REG_DWORD(&reg->hccr);
+					break;
+				}
+
+				/* Clear this intr; it wasn't a mailbox intr */
+				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+				RD_REG_DWORD(&reg->hccr);
+			}
+			udelay(5);
+		}
+		ha->flags.mbox_int = 1;
+
+		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			rval = mb0 & MBS_MASK;
+			for (idx = 0; idx < dwords; idx++)
+				ram[cnt + idx] = IS_QLA27XX(ha) ?
+				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+		} else {
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
+	return rval;
+}
+
+int
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+    uint32_t ram_dwords, void **nxt)
+{
+	int rval;
+	uint32_t cnt, stat, timer, dwords, idx;
+	uint16_t mb0;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	dma_addr_t dump_dma = ha->gid_list_dma;
+	uint32_t *dump = (uint32_t *)ha->gid_list;
+
+	rval = QLA_SUCCESS;
+	mb0 = 0;
+
+	WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	dwords = qla2x00_gid_list_size(ha) / 4;
+	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+	    cnt += dwords, addr += dwords) {
+		if (cnt + dwords > ram_dwords)
+			dwords = ram_dwords - cnt;
+
+		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+
+		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+
+		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+		ha->flags.mbox_int = 0;
+		for (timer = 6000000; timer; timer--) {
+			/* Check for pending interrupts. */
+			stat = RD_REG_DWORD(&reg->host_status);
+			if (stat & HSRX_RISC_INT) {
+				stat &= 0xff;
+
+				if (stat == 0x1 || stat == 0x2 ||
+				    stat == 0x10 || stat == 0x11) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_REG_WORD(&reg->mailbox0);
+
+					WRT_REG_DWORD(&reg->hccr,
+					    HCCRX_CLR_RISC_INT);
+					RD_REG_DWORD(&reg->hccr);
+					break;
+				}
+
+				/* Clear this intr; it wasn't a mailbox intr */
+				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+				RD_REG_DWORD(&reg->hccr);
+			}
+			udelay(5);
+		}
+		ha->flags.mbox_int = 1;
+
+		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			rval = mb0 & MBS_MASK;
+			for (idx = 0; idx < dwords; idx++)
+				ram[cnt + idx] = IS_QLA27XX(ha) ?
+				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
+		} else {
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+	return rval;
+}
+
+static int
+qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
+    uint32_t cram_size, void **nxt)
+{
+	int rval;
+
+	/* Code RAM. */
+	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
+	/* External Memory. */
+	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
+	    ha->fw_memory_size - 0x100000 + 1, nxt);
+	if (rval == QLA_SUCCESS)
+		set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
+	return rval;
+}
+
+static uint32_t *
+qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
+    uint32_t count, uint32_t *buf)
+{
+	uint32_t __iomem *dmp_reg;
+
+	WRT_REG_DWORD(&reg->iobase_addr, iobase);
+	dmp_reg = &reg->iobase_window;
+	for ( ; count--; dmp_reg++)
+		*buf++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	return buf;
+}
+
+void
+qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
+{
+	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+
+	/* 100 usec delay is sufficient enough for hardware to pause RISC */
+	udelay(100);
+	if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+		set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
+}
+
+int
+qla24xx_soft_reset(struct qla_hw_data *ha)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t cnt;
+	uint16_t wd;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	/*
+	 * Reset RISC. The delay is dependent on system architecture.
+	 * Driver can proceed with the reset sequence after waiting
+	 * for a timeout period.
+	 */
+	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+	for (cnt = 0; cnt < 30000; cnt++) {
+		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+			break;
+
+		udelay(10);
+	}
+	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
+
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+	udelay(100);
+
+	/* Wait for soft-reset to complete. */
+	for (cnt = 0; cnt < 30000; cnt++) {
+		if ((RD_REG_DWORD(&reg->ctrl_status) &
+		    CSRX_ISP_SOFT_RESET) == 0)
+			break;
+
+		udelay(10);
+	}
+	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+		set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
+
+	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+	RD_REG_DWORD(&reg->hccr);             /* PCI Posting. */
+
+	for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt)
+			udelay(10);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+	if (rval == QLA_SUCCESS)
+		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+
+	return rval;
+}
+
+static int
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+    uint32_t ram_words, void **nxt)
+{
+	int rval;
+	uint32_t cnt, stat, timer, words, idx;
+	uint16_t mb0;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	dma_addr_t dump_dma = ha->gid_list_dma;
+	uint16_t *dump = (uint16_t *)ha->gid_list;
+
+	rval = QLA_SUCCESS;
+	mb0 = 0;
+
+	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	words = qla2x00_gid_list_size(ha) / 2;
+	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
+	    cnt += words, addr += words) {
+		if (cnt + words > ram_words)
+			words = ram_words - cnt;
+
+		WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
+		WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
+
+		WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
+		WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
+		WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
+		WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
+
+		WRT_MAILBOX_REG(ha, reg, 4, words);
+		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+		for (timer = 6000000; timer; timer--) {
+			/* Check for pending interrupts. */
+			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+			if (stat & HSR_RISC_INT) {
+				stat &= 0xff;
+
+				if (stat == 0x1 || stat == 0x2) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+					/* Release mailbox registers. */
+					WRT_REG_WORD(&reg->semaphore, 0);
+					WRT_REG_WORD(&reg->hccr,
+					    HCCR_CLR_RISC_INT);
+					RD_REG_WORD(&reg->hccr);
+					break;
+				} else if (stat == 0x10 || stat == 0x11) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+					WRT_REG_WORD(&reg->hccr,
+					    HCCR_CLR_RISC_INT);
+					RD_REG_WORD(&reg->hccr);
+					break;
+				}
+
+				/* clear this intr; it wasn't a mailbox intr */
+				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+				RD_REG_WORD(&reg->hccr);
+			}
+			udelay(5);
+		}
+
+		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			rval = mb0 & MBS_MASK;
+			for (idx = 0; idx < words; idx++)
+				ram[cnt + idx] = swab16(dump[idx]);
+		} else {
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+	return rval;
+}
+
+static inline void
+qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
+    uint16_t *buf)
+{
+	uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
+
+	for ( ; count--; dmp_reg++)
+		*buf++ = htons(RD_REG_WORD(dmp_reg));
+}
+
+static inline void *
+qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
+{
+	if (!ha->eft)
+		return ptr;
+
+	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
+	return ptr + ntohl(ha->fw_dump->eft_size);
+}
+
+static inline void *
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	uint32_t cnt;
+	uint32_t *iter_reg;
+	struct qla2xxx_fce_chain *fcec = ptr;
+
+	if (!ha->fce)
+		return ptr;
+
+	*last_chain = &fcec->type;
+	fcec->type = htonl(DUMP_CHAIN_FCE);
+	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
+	    fce_calc_size(ha->fce_bufs));
+	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
+	fcec->addr_l = htonl(LSD(ha->fce_dma));
+	fcec->addr_h = htonl(MSD(ha->fce_dma));
+
+	iter_reg = fcec->eregs;
+	for (cnt = 0; cnt < 8; cnt++)
+		*iter_reg++ = htonl(ha->fce_mb[cnt]);
+
+	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
+
+	return (char *)iter_reg + ntohl(fcec->size);
+}
+
+static inline void *
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	struct qla2xxx_offld_chain *c = ptr;
+
+	if (!ha->exlogin_buf)
+		return ptr;
+
+	*last_chain = &c->type;
+
+	c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
+	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
+	    ha->exlogin_size);
+	c->size = cpu_to_be32(ha->exlogin_size);
+	c->addr = cpu_to_be64(ha->exlogin_buf_dma);
+
+	ptr += sizeof(struct qla2xxx_offld_chain);
+	memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
+
+	return (char *)ptr + cpu_to_be32(c->size);
+}
+
+static inline void *
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	struct qla2xxx_offld_chain *c = ptr;
+
+	if (!ha->exchoffld_buf)
+		return ptr;
+
+	*last_chain = &c->type;
+
+	c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
+	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
+	    ha->exchoffld_size);
+	c->size = cpu_to_be32(ha->exchoffld_size);
+	c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
+
+	ptr += sizeof(struct qla2xxx_offld_chain);
+	memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
+
+	return (char *)ptr + cpu_to_be32(c->size);
+}
+
+static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+	uint32_t **last_chain)
+{
+	struct qla2xxx_mqueue_chain *q;
+	struct qla2xxx_mqueue_header *qh;
+	uint32_t num_queues;
+	int que;
+	struct {
+		int length;
+		void *ring;
+	} aq, *aqp;
+
+	if (!ha->tgt.atio_ring)
+		return ptr;
+
+	num_queues = 1;
+	aqp = &aq;
+	aqp->length = ha->tgt.atio_q_length;
+	aqp->ring = ha->tgt.atio_ring;
+
+	for (que = 0; que < num_queues; que++) {
+		/* aqp = ha->atio_q_map[que]; */
+		q = ptr;
+		*last_chain = &q->type;
+		q->type = htonl(DUMP_CHAIN_QUEUE);
+		q->chain_size = htonl(
+		    sizeof(struct qla2xxx_mqueue_chain) +
+		    sizeof(struct qla2xxx_mqueue_header) +
+		    (aqp->length * sizeof(request_t)));
+		ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+		/* Add header. */
+		qh = ptr;
+		qh->queue = htonl(TYPE_ATIO_QUEUE);
+		qh->number = htonl(que);
+		qh->size = htonl(aqp->length * sizeof(request_t));
+		ptr += sizeof(struct qla2xxx_mqueue_header);
+
+		/* Add data. */
+		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+		ptr += aqp->length * sizeof(request_t);
+	}
+
+	return ptr;
+}
+
+static inline void *
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	struct qla2xxx_mqueue_chain *q;
+	struct qla2xxx_mqueue_header *qh;
+	struct req_que *req;
+	struct rsp_que *rsp;
+	int que;
+
+	if (!ha->mqenable)
+		return ptr;
+
+	/* Request queues */
+	for (que = 1; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			break;
+
+		/* Add chain. */
+		q = ptr;
+		*last_chain = &q->type;
+		q->type = htonl(DUMP_CHAIN_QUEUE);
+		q->chain_size = htonl(
+		    sizeof(struct qla2xxx_mqueue_chain) +
+		    sizeof(struct qla2xxx_mqueue_header) +
+		    (req->length * sizeof(request_t)));
+		ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+		/* Add header. */
+		qh = ptr;
+		qh->queue = htonl(TYPE_REQUEST_QUEUE);
+		qh->number = htonl(que);
+		qh->size = htonl(req->length * sizeof(request_t));
+		ptr += sizeof(struct qla2xxx_mqueue_header);
+
+		/* Add data. */
+		memcpy(ptr, req->ring, req->length * sizeof(request_t));
+		ptr += req->length * sizeof(request_t);
+	}
+
+	/* Response queues */
+	for (que = 1; que < ha->max_rsp_queues; que++) {
+		rsp = ha->rsp_q_map[que];
+		if (!rsp)
+			break;
+
+		/* Add chain. */
+		q = ptr;
+		*last_chain = &q->type;
+		q->type = htonl(DUMP_CHAIN_QUEUE);
+		q->chain_size = htonl(
+		    sizeof(struct qla2xxx_mqueue_chain) +
+		    sizeof(struct qla2xxx_mqueue_header) +
+		    (rsp->length * sizeof(response_t)));
+		ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+		/* Add header. */
+		qh = ptr;
+		qh->queue = htonl(TYPE_RESPONSE_QUEUE);
+		qh->number = htonl(que);
+		qh->size = htonl(rsp->length * sizeof(response_t));
+		ptr += sizeof(struct qla2xxx_mqueue_header);
+
+		/* Add data. */
+		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
+		ptr += rsp->length * sizeof(response_t);
+	}
+
+	return ptr;
+}
+
+static inline void *
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+	uint32_t cnt, que_idx;
+	uint8_t que_cnt;
+	struct qla2xxx_mq_chain *mq = ptr;
+	device_reg_t *reg;
+
+	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		return ptr;
+
+	mq = ptr;
+	*last_chain = &mq->type;
+	mq->type = htonl(DUMP_CHAIN_MQ);
+	mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
+
+	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
+		ha->max_req_queues : ha->max_rsp_queues;
+	mq->count = htonl(que_cnt);
+	for (cnt = 0; cnt < que_cnt; cnt++) {
+		reg = ISP_QUE_REG(ha, cnt);
+		que_idx = cnt * 4;
+		mq->qregs[que_idx] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+		mq->qregs[que_idx+1] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+		mq->qregs[que_idx+2] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+		mq->qregs[que_idx+3] =
+		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
+	}
+
+	return ptr + sizeof(struct qla2xxx_mq_chain);
+}
+
+void
+qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xd000,
+		    "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
+		    rval, ha->fw_dump_cap_flags);
+		ha->fw_dumped = 0;
+	} else {
+		ql_log(ql_log_info, vha, 0xd001,
+		    "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
+		    vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
+		ha->fw_dumped = 1;
+		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+	}
+}
+
+/**
+ * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
+ * @ha: HA context
+ * @hardware_locked: Called with the hardware_lock
+ */
+void
+qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint16_t __iomem *dmp_reg;
+	unsigned long	flags;
+	struct qla2300_fw_dump	*fw;
+	void		*nxt;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd002,
+		    "No buffer available for dump.\n");
+		goto qla2300_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd003,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n",
+		    ha->fw_dump);
+		goto qla2300_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp23;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	rval = QLA_SUCCESS;
+	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+
+	/* Pause RISC. */
+	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+	if (IS_QLA2300(ha)) {
+		for (cnt = 30000;
+		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+			rval == QLA_SUCCESS; cnt--) {
+			if (cnt)
+				udelay(100);
+			else
+				rval = QLA_FUNCTION_TIMEOUT;
+		}
+	} else {
+		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
+		udelay(10);
+	}
+
+	if (rval == QLA_SUCCESS) {
+		dmp_reg = &reg->flash_address;
+		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		dmp_reg = &reg->u.isp2300.req_q_in;
+		for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+		    cnt++, dmp_reg++)
+			fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		dmp_reg = &reg->u.isp2300.mailbox0;
+		for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+		    cnt++, dmp_reg++)
+			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x40);
+		qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x50);
+		qla2xxx_read_window(reg, 48, fw->dma_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x00);
+		dmp_reg = &reg->risc_hw;
+		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+		    cnt++, dmp_reg++)
+			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		WRT_REG_WORD(&reg->pcr, 0x2000);
+		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2200);
+		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2400);
+		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2600);
+		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2800);
+		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2A00);
+		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2C00);
+		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2E00);
+		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x10);
+		qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x20);
+		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x30);
+		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
+
+		/* Reset RISC. */
+		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+		for (cnt = 0; cnt < 30000; cnt++) {
+			if ((RD_REG_WORD(&reg->ctrl_status) &
+			    CSR_ISP_SOFT_RESET) == 0)
+				break;
+
+			udelay(10);
+		}
+	}
+
+	if (!IS_QLA2300(ha)) {
+		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
+		    rval == QLA_SUCCESS; cnt--) {
+			if (cnt)
+				udelay(100);
+			else
+				rval = QLA_FUNCTION_TIMEOUT;
+		}
+	}
+
+	/* Get RISC SRAM. */
+	if (rval == QLA_SUCCESS)
+		rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
+		    sizeof(fw->risc_ram) / 2, &nxt);
+
+	/* Get stack SRAM. */
+	if (rval == QLA_SUCCESS)
+		rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
+		    sizeof(fw->stack_ram) / 2, &nxt);
+
+	/* Get data SRAM. */
+	if (rval == QLA_SUCCESS)
+		rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
+		    ha->fw_memory_size - 0x11000 + 1, &nxt);
+
+	if (rval == QLA_SUCCESS)
+		qla2xxx_copy_queues(ha, nxt);
+
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla2300_fw_dump_failed:
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
+}
+
+/**
+ * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
+ * @ha: HA context
+ * @hardware_locked: Called with the hardware_lock
+ */
+void
+qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt, timer;
+	uint16_t	risc_address;
+	uint16_t	mb0, mb2;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint16_t __iomem *dmp_reg;
+	unsigned long	flags;
+	struct qla2100_fw_dump	*fw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	risc_address = 0;
+	mb0 = mb2 = 0;
+	flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd004,
+		    "No buffer available for dump.\n");
+		goto qla2100_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd005,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n",
+		    ha->fw_dump);
+		goto qla2100_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp21;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	rval = QLA_SUCCESS;
+	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+
+	/* Pause RISC. */
+	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+	for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt)
+			udelay(100);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+	if (rval == QLA_SUCCESS) {
+		dmp_reg = &reg->flash_address;
+		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		dmp_reg = &reg->u.isp2100.mailbox0;
+		for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
+			if (cnt == 8)
+				dmp_reg = &reg->u_end.isp2200.mailbox8;
+
+			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+		}
+
+		dmp_reg = &reg->u.isp2100.unused_2[0];
+		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
+			fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x00);
+		dmp_reg = &reg->risc_hw;
+		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
+			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+
+		WRT_REG_WORD(&reg->pcr, 0x2000);
+		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2100);
+		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2200);
+		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2300);
+		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2400);
+		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2500);
+		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2600);
+		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
+
+		WRT_REG_WORD(&reg->pcr, 0x2700);
+		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x10);
+		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x20);
+		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
+
+		WRT_REG_WORD(&reg->ctrl_status, 0x30);
+		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
+
+		/* Reset the ISP. */
+		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+	}
+
+	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt)
+			udelay(100);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+
+	/* Pause RISC. */
+	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
+	    (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
+
+		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+		for (cnt = 30000;
+		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+		    rval == QLA_SUCCESS; cnt--) {
+			if (cnt)
+				udelay(100);
+			else
+				rval = QLA_FUNCTION_TIMEOUT;
+		}
+		if (rval == QLA_SUCCESS) {
+			/* Set memory configuration and timing. */
+			if (IS_QLA2100(ha))
+				WRT_REG_WORD(&reg->mctr, 0xf1);
+			else
+				WRT_REG_WORD(&reg->mctr, 0xf2);
+			RD_REG_WORD(&reg->mctr);	/* PCI Posting. */
+
+			/* Release RISC. */
+			WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+		}
+	}
+
+	if (rval == QLA_SUCCESS) {
+		/* Get RISC SRAM. */
+		risc_address = 0x1000;
+ 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
+		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+	}
+	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
+	    cnt++, risc_address++) {
+ 		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
+		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+		for (timer = 6000000; timer != 0; timer--) {
+			/* Check for pending interrupts. */
+			if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
+				if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+					set_bit(MBX_INTERRUPT,
+					    &ha->mbx_cmd_flags);
+
+					mb0 = RD_MAILBOX_REG(ha, reg, 0);
+					mb2 = RD_MAILBOX_REG(ha, reg, 2);
+
+					WRT_REG_WORD(&reg->semaphore, 0);
+					WRT_REG_WORD(&reg->hccr,
+					    HCCR_CLR_RISC_INT);
+					RD_REG_WORD(&reg->hccr);
+					break;
+				}
+				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+				RD_REG_WORD(&reg->hccr);
+			}
+			udelay(5);
+		}
+
+		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+			rval = mb0 & MBS_MASK;
+			fw->risc_ram[cnt] = htons(mb2);
+		} else {
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	if (rval == QLA_SUCCESS)
+		qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
+
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla2100_fw_dump_failed:
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
+}
+
+void
+qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t __iomem *dmp_reg;
+	uint32_t	*iter_reg;
+	uint16_t __iomem *mbx_reg;
+	unsigned long	flags;
+	struct qla24xx_fw_dump *fw;
+	void		*nxt;
+	void		*nxt_chain;
+	uint32_t	*last_chain = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	if (IS_P3P_TYPE(ha))
+		return;
+
+	flags = 0;
+	ha->fw_dump_cap_flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd006,
+		    "No buffer available for dump.\n");
+		goto qla24xx_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd007,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n",
+		    ha->fw_dump);
+		goto qla24xx_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp24;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+	/*
+	 * Pause RISC. No need to track timeout, as resetting the chip
+	 * is the right approach incase of pause timeout
+	 */
+	qla24xx_pause_risc(reg, ha);
+
+	/* Host interface registers. */
+	dmp_reg = &reg->flash_addr;
+	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Disable interrupts. */
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+
+	/* Shadow registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	/* Mailbox registers. */
+	mbx_reg = &reg->mailbox0;
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+
+	/* Transfer sequence registers. */
+	iter_reg = fw->xseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
+	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+	/* Receive sequence registers. */
+	iter_reg = fw->rseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
+	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+	/* Command DMA registers. */
+	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+	/* Queues. */
+	iter_reg = fw->req0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->resp0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->req1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Transmit DMA registers. */
+	iter_reg = fw->xmt0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+	iter_reg = fw->xmt1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+	iter_reg = fw->xmt2_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+	iter_reg = fw->xmt3_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+	iter_reg = fw->xmt4_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+	/* Receive DMA registers. */
+	iter_reg = fw->rcvt0_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+	iter_reg = fw->rcvt1_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+	/* RISC registers. */
+	iter_reg = fw->risc_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+	/* Local memory controller registers. */
+	iter_reg = fw->lmc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+	qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+
+	/* Fibre Protocol Module registers. */
+	iter_reg = fw->fpm_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+
+	/* Frame Buffer registers. */
+	iter_reg = fw->fb_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+	qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+
+	rval = qla24xx_soft_reset(ha);
+	if (rval != QLA_SUCCESS)
+		goto qla24xx_fw_dump_failed_0;
+
+	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+	    &nxt);
+	if (rval != QLA_SUCCESS)
+		goto qla24xx_fw_dump_failed_0;
+
+	nxt = qla2xxx_copy_queues(ha, nxt);
+
+	qla24xx_copy_eft(ha, nxt);
+
+	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
+	}
+
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla24xx_fw_dump_failed_0:
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla24xx_fw_dump_failed:
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
+}
+
+void
+qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t __iomem *dmp_reg;
+	uint32_t	*iter_reg;
+	uint16_t __iomem *mbx_reg;
+	unsigned long	flags;
+	struct qla25xx_fw_dump *fw;
+	void		*nxt, *nxt_chain;
+	uint32_t	*last_chain = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	flags = 0;
+	ha->fw_dump_cap_flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd008,
+		    "No buffer available for dump.\n");
+		goto qla25xx_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd009,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n",
+		    ha->fw_dump);
+		goto qla25xx_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp25;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+	ha->fw_dump->version = htonl(2);
+
+	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+	/*
+	 * Pause RISC. No need to track timeout, as resetting the chip
+	 * is the right approach incase of pause timeout
+	 */
+	qla24xx_pause_risc(reg, ha);
+
+	/* Host/Risc registers. */
+	iter_reg = fw->host_risc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+
+	/* PCIe registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_window, 0x01);
+	dmp_reg = &reg->iobase_c4;
+	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+	dmp_reg++;
+	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+	dmp_reg++;
+	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	WRT_REG_DWORD(&reg->iobase_window, 0x00);
+	RD_REG_DWORD(&reg->iobase_window);
+
+	/* Host interface registers. */
+	dmp_reg = &reg->flash_addr;
+	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Disable interrupts. */
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+
+	/* Shadow registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	/* RISC I/O register. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	/* Mailbox registers. */
+	mbx_reg = &reg->mailbox0;
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+
+	/* Transfer sequence registers. */
+	iter_reg = fw->xseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+	iter_reg = fw->xseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+	/* Receive sequence registers. */
+	iter_reg = fw->rseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+	iter_reg = fw->rseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+	/* Auxiliary sequence registers. */
+	iter_reg = fw->aseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+
+	iter_reg = fw->aseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+
+	/* Command DMA registers. */
+	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+	/* Queues. */
+	iter_reg = fw->req0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->resp0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->req1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Transmit DMA registers. */
+	iter_reg = fw->xmt0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+	iter_reg = fw->xmt1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+	iter_reg = fw->xmt2_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+	iter_reg = fw->xmt3_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+	iter_reg = fw->xmt4_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+	/* Receive DMA registers. */
+	iter_reg = fw->rcvt0_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+	iter_reg = fw->rcvt1_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+	/* RISC registers. */
+	iter_reg = fw->risc_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+	/* Local memory controller registers. */
+	iter_reg = fw->lmc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+	/* Fibre Protocol Module registers. */
+	iter_reg = fw->fpm_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+
+	/* Frame Buffer registers. */
+	iter_reg = fw->fb_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+	/* Multi queue registers */
+	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+	    &last_chain);
+
+	rval = qla24xx_soft_reset(ha);
+	if (rval != QLA_SUCCESS)
+		goto qla25xx_fw_dump_failed_0;
+
+	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+	    &nxt);
+	if (rval != QLA_SUCCESS)
+		goto qla25xx_fw_dump_failed_0;
+
+	nxt = qla2xxx_copy_queues(ha, nxt);
+
+	qla24xx_copy_eft(ha, nxt);
+
+	/* Chain entries -- started with MQ. */
+	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
+	}
+
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla25xx_fw_dump_failed_0:
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla25xx_fw_dump_failed:
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
+}
+
+void
+qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t __iomem *dmp_reg;
+	uint32_t	*iter_reg;
+	uint16_t __iomem *mbx_reg;
+	unsigned long	flags;
+	struct qla81xx_fw_dump *fw;
+	void		*nxt, *nxt_chain;
+	uint32_t	*last_chain = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	flags = 0;
+	ha->fw_dump_cap_flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd00a,
+		    "No buffer available for dump.\n");
+		goto qla81xx_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd00b,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n",
+		    ha->fw_dump);
+		goto qla81xx_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp81;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+	/*
+	 * Pause RISC. No need to track timeout, as resetting the chip
+	 * is the right approach incase of pause timeout
+	 */
+	qla24xx_pause_risc(reg, ha);
+
+	/* Host/Risc registers. */
+	iter_reg = fw->host_risc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+
+	/* PCIe registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_window, 0x01);
+	dmp_reg = &reg->iobase_c4;
+	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+	dmp_reg++;
+	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+	dmp_reg++;
+	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	WRT_REG_DWORD(&reg->iobase_window, 0x00);
+	RD_REG_DWORD(&reg->iobase_window);
+
+	/* Host interface registers. */
+	dmp_reg = &reg->flash_addr;
+	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Disable interrupts. */
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+
+	/* Shadow registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	/* RISC I/O register. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	/* Mailbox registers. */
+	mbx_reg = &reg->mailbox0;
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+
+	/* Transfer sequence registers. */
+	iter_reg = fw->xseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+	iter_reg = fw->xseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+	/* Receive sequence registers. */
+	iter_reg = fw->rseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+	iter_reg = fw->rseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+
+	/* Auxiliary sequence registers. */
+	iter_reg = fw->aseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+
+	iter_reg = fw->aseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+
+	/* Command DMA registers. */
+	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
+
+	/* Queues. */
+	iter_reg = fw->req0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->resp0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->req1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Transmit DMA registers. */
+	iter_reg = fw->xmt0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+	iter_reg = fw->xmt1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+	iter_reg = fw->xmt2_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+	iter_reg = fw->xmt3_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+	iter_reg = fw->xmt4_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+	/* Receive DMA registers. */
+	iter_reg = fw->rcvt0_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+	iter_reg = fw->rcvt1_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+	/* RISC registers. */
+	iter_reg = fw->risc_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+	/* Local memory controller registers. */
+	iter_reg = fw->lmc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+	/* Fibre Protocol Module registers. */
+	iter_reg = fw->fpm_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+
+	/* Frame Buffer registers. */
+	iter_reg = fw->fb_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+	/* Multi queue registers */
+	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+	    &last_chain);
+
+	rval = qla24xx_soft_reset(ha);
+	if (rval != QLA_SUCCESS)
+		goto qla81xx_fw_dump_failed_0;
+
+	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+	    &nxt);
+	if (rval != QLA_SUCCESS)
+		goto qla81xx_fw_dump_failed_0;
+
+	nxt = qla2xxx_copy_queues(ha, nxt);
+
+	qla24xx_copy_eft(ha, nxt);
+
+	/* Chain entries -- started with MQ. */
+	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
+	}
+
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla81xx_fw_dump_failed_0:
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla81xx_fw_dump_failed:
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
+}
+
+void
+qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	int		rval;
+	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t __iomem *dmp_reg;
+	uint32_t	*iter_reg;
+	uint16_t __iomem *mbx_reg;
+	unsigned long	flags;
+	struct qla83xx_fw_dump *fw;
+	void		*nxt, *nxt_chain;
+	uint32_t	*last_chain = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	flags = 0;
+	ha->fw_dump_cap_flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
+
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0xd00c,
+		    "No buffer available for dump!!!\n");
+		goto qla83xx_fw_dump_failed;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xd00d,
+		    "Firmware has been previously dumped (%p) -- ignoring "
+		    "request...\n", ha->fw_dump);
+		goto qla83xx_fw_dump_failed;
+	}
+	fw = &ha->fw_dump->isp.isp83;
+	qla2xxx_prep_dump(ha, ha->fw_dump);
+
+	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+	/*
+	 * Pause RISC. No need to track timeout, as resetting the chip
+	 * is the right approach incase of pause timeout
+	 */
+	qla24xx_pause_risc(reg, ha);
+
+	WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+	dmp_reg = &reg->iobase_window;
+	RD_REG_DWORD(dmp_reg);
+	WRT_REG_DWORD(dmp_reg, 0);
+
+	dmp_reg = &reg->unused_4_1[0];
+	RD_REG_DWORD(dmp_reg);
+	WRT_REG_DWORD(dmp_reg, 0);
+
+	WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+	dmp_reg = &reg->unused_4_1[2];
+	RD_REG_DWORD(dmp_reg);
+	WRT_REG_DWORD(dmp_reg, 0);
+
+	/* select PCR and disable ecc checking and correction */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0x60000000);	/* write to F0h = PCR */
+
+	/* Host/Risc registers. */
+	iter_reg = fw->host_risc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7040, 16, iter_reg);
+
+	/* PCIe registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_window, 0x01);
+	dmp_reg = &reg->iobase_c4;
+	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+	dmp_reg++;
+	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+	dmp_reg++;
+	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
+	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	WRT_REG_DWORD(&reg->iobase_window, 0x00);
+	RD_REG_DWORD(&reg->iobase_window);
+
+	/* Host interface registers. */
+	dmp_reg = &reg->flash_addr;
+	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Disable interrupts. */
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+
+	/* Shadow registers. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+	/* RISC I/O register. */
+	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+	/* Mailbox registers. */
+	mbx_reg = &reg->mailbox0;
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+
+	/* Transfer sequence registers. */
+	iter_reg = fw->xseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
+
+	iter_reg = fw->xseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
+
+	qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
+
+	/* Receive sequence registers. */
+	iter_reg = fw->rseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
+
+	iter_reg = fw->rseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
+	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
+	qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
+
+	/* Auxiliary sequence registers. */
+	iter_reg = fw->aseq_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB170, 16, iter_reg);
+
+	iter_reg = fw->aseq_0_reg;
+	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
+	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
+	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
+	qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
+
+	/* Command DMA registers. */
+	iter_reg = fw->cmd_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
+	qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
+
+	/* Queues. */
+	iter_reg = fw->req0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->resp0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	iter_reg = fw->req1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
+	dmp_reg = &reg->iobase_q;
+	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+
+	/* Transmit DMA registers. */
+	iter_reg = fw->xmt0_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
+
+	iter_reg = fw->xmt1_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
+
+	iter_reg = fw->xmt2_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
+
+	iter_reg = fw->xmt3_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
+
+	iter_reg = fw->xmt4_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
+
+	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
+
+	/* Receive DMA registers. */
+	iter_reg = fw->rcvt0_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
+
+	iter_reg = fw->rcvt1_data_dma_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
+	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
+
+	/* RISC registers. */
+	iter_reg = fw->risc_gp_reg;
+	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
+	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
+
+	/* Local memory controller registers. */
+	iter_reg = fw->lmc_reg;
+	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
+	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
+
+	/* Fibre Protocol Module registers. */
+	iter_reg = fw->fpm_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
+
+	/* RQ0 Array registers. */
+	iter_reg = fw->rq0_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
+
+	/* RQ1 Array registers. */
+	iter_reg = fw->rq1_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
+
+	/* RP0 Array registers. */
+	iter_reg = fw->rp0_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
+
+	/* RP1 Array registers. */
+	iter_reg = fw->rp1_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
+
+	iter_reg = fw->at0_array_reg;
+	iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
+
+	/* I/O Queue Control registers. */
+	qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
+
+	/* Frame Buffer registers. */
+	iter_reg = fw->fb_hdw_reg;
+	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
+	iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
+	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
+
+	/* Multi queue registers */
+	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
+	    &last_chain);
+
+	rval = qla24xx_soft_reset(ha);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xd00e,
+		    "SOFT RESET FAILED, forcing continuation of dump!!!\n");
+		rval = QLA_SUCCESS;
+
+		ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
+
+		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+		RD_REG_DWORD(&reg->hccr);
+
+		WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+		RD_REG_DWORD(&reg->hccr);
+
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+		RD_REG_DWORD(&reg->hccr);
+
+		for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+			udelay(5);
+
+		if (!cnt) {
+			nxt = fw->code_ram;
+			nxt += sizeof(fw->code_ram);
+			nxt += (ha->fw_memory_size - 0x100000 + 1);
+			goto copy_queue;
+		} else {
+			set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+			ql_log(ql_log_warn, vha, 0xd010,
+			    "bigger hammer success?\n");
+		}
+	}
+
+	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
+	    &nxt);
+	if (rval != QLA_SUCCESS)
+		goto qla83xx_fw_dump_failed_0;
+
+copy_queue:
+	nxt = qla2xxx_copy_queues(ha, nxt);
+
+	qla24xx_copy_eft(ha, nxt);
+
+	/* Chain entries -- started with MQ. */
+	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
+	if (last_chain) {
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
+	}
+
+	/* Adjust valid length. */
+	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
+qla83xx_fw_dump_failed_0:
+	qla2xxx_dump_post_process(base_vha, rval);
+
+qla83xx_fw_dump_failed:
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
+}
+
+/****************************************************************************/
+/*                         Driver Debug Functions.                          */
+/****************************************************************************/
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ *        If ql2xextended_error_logging value is correctly set,
+ *        this message will appear in the messages file.
+ * vha:   Pointer to the scsi_qla_host_t.
+ * id:    This is a unique identifier for the level. It identifies the
+ *        part of the code from where the message originated.
+ * msg:   The message to be displayed.
+ */
+void
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+
+	if (!ql_mask_match(level))
+		return;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	if (vha != NULL) {
+		const struct pci_dev *pdev = vha->hw->pdev;
+		/* <module-name> <pci-name> <msg-id>:<host> Message */
+		pr_warn("%s [%s]-%04x:%ld: %pV",
+			QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+			vha->host_no, &vaf);
+	} else {
+		pr_warn("%s [%s]-%04x: : %pV",
+			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
+	}
+
+	va_end(va);
+
+}
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is not available and pci is available,
+ * i.e., before host allocation. It formats the message and logs it
+ * to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ *        If ql2xextended_error_logging value is correctly set,
+ *        this message will appear in the messages file.
+ * pdev:  Pointer to the struct pci_dev.
+ * id:    This is a unique id for the level. It identifies the part
+ *        of the code from where the message originated.
+ * msg:   The message to be displayed.
+ */
+void
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+	   const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+
+	if (pdev == NULL)
+		return;
+	if (!ql_mask_match(level))
+		return;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	/* <module-name> <dev-name>:<msg-id> Message */
+	pr_warn("%s [%s]-%04x: : %pV",
+		QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
+
+	va_end(va);
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ *        messages file.
+ * vha:   Pointer to the scsi_qla_host_t
+ * id:    This is a unique id for the level. It identifies the
+ *        part of the code from where the message originated.
+ * msg:   The message to be displayed.
+ */
+void
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+	char pbuf[128];
+
+	if (level > ql_errlev)
+		return;
+
+	if (vha != NULL) {
+		const struct pci_dev *pdev = vha->hw->pdev;
+		/* <module-name> <msg-id>:<host> Message */
+		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+			QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+	} else {
+		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+			QL_MSGHDR, "0000:00:00.0", id);
+	}
+	pbuf[sizeof(pbuf) - 1] = 0;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	switch (level) {
+	case ql_log_fatal: /* FATAL LOG */
+		pr_crit("%s%pV", pbuf, &vaf);
+		break;
+	case ql_log_warn:
+		pr_err("%s%pV", pbuf, &vaf);
+		break;
+	case ql_log_info:
+		pr_warn("%s%pV", pbuf, &vaf);
+		break;
+	default:
+		pr_info("%s%pV", pbuf, &vaf);
+		break;
+	}
+
+	va_end(va);
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is not available and pci is available,
+ * i.e., before host allocation. It formats the message and logs
+ * it to the messages file. All the messages are logged irrespective
+ * of the value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ *        messages file.
+ * pdev:  Pointer to the struct pci_dev.
+ * id:    This is a unique id for the level. It identifies the
+ *        part of the code from where the message originated.
+ * msg:   The message to be displayed.
+ */
+void
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+	   const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+	char pbuf[128];
+
+	if (pdev == NULL)
+		return;
+	if (level > ql_errlev)
+		return;
+
+	/* <module-name> <dev-name>:<msg-id> Message */
+	snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+		 QL_MSGHDR, dev_name(&(pdev->dev)), id);
+	pbuf[sizeof(pbuf) - 1] = 0;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	switch (level) {
+	case ql_log_fatal: /* FATAL LOG */
+		pr_crit("%s%pV", pbuf, &vaf);
+		break;
+	case ql_log_warn:
+		pr_err("%s%pV", pbuf, &vaf);
+		break;
+	case ql_log_info:
+		pr_warn("%s%pV", pbuf, &vaf);
+		break;
+	default:
+		pr_info("%s%pV", pbuf, &vaf);
+		break;
+	}
+
+	va_end(va);
+}
+
+void
+ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+{
+	int i;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+	uint16_t __iomem *mbx_reg;
+
+	if (!ql_mask_match(level))
+		return;
+
+	if (IS_P3P_TYPE(ha))
+		mbx_reg = &reg82->mailbox_in[0];
+	else if (IS_FWI2_CAPABLE(ha))
+		mbx_reg = &reg24->mailbox0;
+	else
+		mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+	ql_dbg(level, vha, id, "Mailbox registers:\n");
+	for (i = 0; i < 6; i++, mbx_reg++)
+		ql_dbg(level, vha, id,
+		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
+}
+
+
+void
+ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
+	uint8_t *buf, uint size)
+{
+	uint cnt;
+
+	if (!ql_mask_match(level))
+		return;
+
+	ql_dbg(level, vha, id,
+	    "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
+	ql_dbg(level, vha, id,
+	    "----- -----------------------------------------------\n");
+	for (cnt = 0; cnt < size; cnt += 16) {
+		ql_dbg(level, vha, id, "%04x: ", cnt);
+		print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+			       buf + cnt, min(16U, size - cnt), false);
+	}
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ *        messages file.
+ * vha:   Pointer to the scsi_qla_host_t
+ * id:    This is a unique id for the level. It identifies the
+ *        part of the code from where the message originated.
+ * msg:   The message to be displayed.
+ */
+void
+ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
+    const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+	char pbuf[128];
+
+	if (level > ql_errlev)
+		return;
+
+	if (qpair != NULL) {
+		const struct pci_dev *pdev = qpair->pdev;
+		/* <module-name> <msg-id>:<host> Message */
+		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
+			QL_MSGHDR, dev_name(&(pdev->dev)), id);
+	} else {
+		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+			QL_MSGHDR, "0000:00:00.0", id);
+	}
+	pbuf[sizeof(pbuf) - 1] = 0;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	switch (level) {
+	case ql_log_fatal: /* FATAL LOG */
+		pr_crit("%s%pV", pbuf, &vaf);
+		break;
+	case ql_log_warn:
+		pr_err("%s%pV", pbuf, &vaf);
+		break;
+	case ql_log_info:
+		pr_warn("%s%pV", pbuf, &vaf);
+		break;
+	default:
+		pr_info("%s%pV", pbuf, &vaf);
+		break;
+	}
+
+	va_end(va);
+}
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ *        If ql2xextended_error_logging value is correctly set,
+ *        this message will appear in the messages file.
+ * vha:   Pointer to the scsi_qla_host_t.
+ * id:    This is a unique identifier for the level. It identifies the
+ *        part of the code from where the message originated.
+ * msg:   The message to be displayed.
+ */
+void
+ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
+    const char *fmt, ...)
+{
+	va_list va;
+	struct va_format vaf;
+
+	if (!ql_mask_match(level))
+		return;
+
+	va_start(va, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	if (qpair != NULL) {
+		const struct pci_dev *pdev = qpair->pdev;
+		/* <module-name> <pci-name> <msg-id>:<host> Message */
+		pr_warn("%s [%s]-%04x: %pV",
+		    QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+		    &vaf);
+	} else {
+		pr_warn("%s [%s]-%04x: : %pV",
+			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
+	}
+
+	va_end(va);
+
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dbg.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dbg.h
new file mode 100644
index 0000000..ceca6dd
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dbg.h
@@ -0,0 +1,382 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include "qla_def.h"
+
+/*
+ * Firmware Dump structure definition
+ */
+
+struct qla2300_fw_dump {
+	uint16_t hccr;
+	uint16_t pbiu_reg[8];
+	uint16_t risc_host_reg[8];
+	uint16_t mailbox_reg[32];
+	uint16_t resp_dma_reg[32];
+	uint16_t dma_reg[48];
+	uint16_t risc_hdw_reg[16];
+	uint16_t risc_gp0_reg[16];
+	uint16_t risc_gp1_reg[16];
+	uint16_t risc_gp2_reg[16];
+	uint16_t risc_gp3_reg[16];
+	uint16_t risc_gp4_reg[16];
+	uint16_t risc_gp5_reg[16];
+	uint16_t risc_gp6_reg[16];
+	uint16_t risc_gp7_reg[16];
+	uint16_t frame_buf_hdw_reg[64];
+	uint16_t fpm_b0_reg[64];
+	uint16_t fpm_b1_reg[64];
+	uint16_t risc_ram[0xf800];
+	uint16_t stack_ram[0x1000];
+	uint16_t data_ram[1];
+};
+
+struct qla2100_fw_dump {
+	uint16_t hccr;
+	uint16_t pbiu_reg[8];
+	uint16_t mailbox_reg[32];
+	uint16_t dma_reg[48];
+	uint16_t risc_hdw_reg[16];
+	uint16_t risc_gp0_reg[16];
+	uint16_t risc_gp1_reg[16];
+	uint16_t risc_gp2_reg[16];
+	uint16_t risc_gp3_reg[16];
+	uint16_t risc_gp4_reg[16];
+	uint16_t risc_gp5_reg[16];
+	uint16_t risc_gp6_reg[16];
+	uint16_t risc_gp7_reg[16];
+	uint16_t frame_buf_hdw_reg[16];
+	uint16_t fpm_b0_reg[64];
+	uint16_t fpm_b1_reg[64];
+	uint16_t risc_ram[0xf000];
+};
+
+struct qla24xx_fw_dump {
+	uint32_t host_status;
+	uint32_t host_reg[32];
+	uint32_t shadow_reg[7];
+	uint16_t mailbox_reg[32];
+	uint32_t xseq_gp_reg[128];
+	uint32_t xseq_0_reg[16];
+	uint32_t xseq_1_reg[16];
+	uint32_t rseq_gp_reg[128];
+	uint32_t rseq_0_reg[16];
+	uint32_t rseq_1_reg[16];
+	uint32_t rseq_2_reg[16];
+	uint32_t cmd_dma_reg[16];
+	uint32_t req0_dma_reg[15];
+	uint32_t resp0_dma_reg[15];
+	uint32_t req1_dma_reg[15];
+	uint32_t xmt0_dma_reg[32];
+	uint32_t xmt1_dma_reg[32];
+	uint32_t xmt2_dma_reg[32];
+	uint32_t xmt3_dma_reg[32];
+	uint32_t xmt4_dma_reg[32];
+	uint32_t xmt_data_dma_reg[16];
+	uint32_t rcvt0_data_dma_reg[32];
+	uint32_t rcvt1_data_dma_reg[32];
+	uint32_t risc_gp_reg[128];
+	uint32_t lmc_reg[112];
+	uint32_t fpm_hdw_reg[192];
+	uint32_t fb_hdw_reg[176];
+	uint32_t code_ram[0x2000];
+	uint32_t ext_mem[1];
+};
+
+struct qla25xx_fw_dump {
+	uint32_t host_status;
+	uint32_t host_risc_reg[32];
+	uint32_t pcie_regs[4];
+	uint32_t host_reg[32];
+	uint32_t shadow_reg[11];
+	uint32_t risc_io_reg;
+	uint16_t mailbox_reg[32];
+	uint32_t xseq_gp_reg[128];
+	uint32_t xseq_0_reg[48];
+	uint32_t xseq_1_reg[16];
+	uint32_t rseq_gp_reg[128];
+	uint32_t rseq_0_reg[32];
+	uint32_t rseq_1_reg[16];
+	uint32_t rseq_2_reg[16];
+	uint32_t aseq_gp_reg[128];
+	uint32_t aseq_0_reg[32];
+	uint32_t aseq_1_reg[16];
+	uint32_t aseq_2_reg[16];
+	uint32_t cmd_dma_reg[16];
+	uint32_t req0_dma_reg[15];
+	uint32_t resp0_dma_reg[15];
+	uint32_t req1_dma_reg[15];
+	uint32_t xmt0_dma_reg[32];
+	uint32_t xmt1_dma_reg[32];
+	uint32_t xmt2_dma_reg[32];
+	uint32_t xmt3_dma_reg[32];
+	uint32_t xmt4_dma_reg[32];
+	uint32_t xmt_data_dma_reg[16];
+	uint32_t rcvt0_data_dma_reg[32];
+	uint32_t rcvt1_data_dma_reg[32];
+	uint32_t risc_gp_reg[128];
+	uint32_t lmc_reg[128];
+	uint32_t fpm_hdw_reg[192];
+	uint32_t fb_hdw_reg[192];
+	uint32_t code_ram[0x2000];
+	uint32_t ext_mem[1];
+};
+
+struct qla81xx_fw_dump {
+	uint32_t host_status;
+	uint32_t host_risc_reg[32];
+	uint32_t pcie_regs[4];
+	uint32_t host_reg[32];
+	uint32_t shadow_reg[11];
+	uint32_t risc_io_reg;
+	uint16_t mailbox_reg[32];
+	uint32_t xseq_gp_reg[128];
+	uint32_t xseq_0_reg[48];
+	uint32_t xseq_1_reg[16];
+	uint32_t rseq_gp_reg[128];
+	uint32_t rseq_0_reg[32];
+	uint32_t rseq_1_reg[16];
+	uint32_t rseq_2_reg[16];
+	uint32_t aseq_gp_reg[128];
+	uint32_t aseq_0_reg[32];
+	uint32_t aseq_1_reg[16];
+	uint32_t aseq_2_reg[16];
+	uint32_t cmd_dma_reg[16];
+	uint32_t req0_dma_reg[15];
+	uint32_t resp0_dma_reg[15];
+	uint32_t req1_dma_reg[15];
+	uint32_t xmt0_dma_reg[32];
+	uint32_t xmt1_dma_reg[32];
+	uint32_t xmt2_dma_reg[32];
+	uint32_t xmt3_dma_reg[32];
+	uint32_t xmt4_dma_reg[32];
+	uint32_t xmt_data_dma_reg[16];
+	uint32_t rcvt0_data_dma_reg[32];
+	uint32_t rcvt1_data_dma_reg[32];
+	uint32_t risc_gp_reg[128];
+	uint32_t lmc_reg[128];
+	uint32_t fpm_hdw_reg[224];
+	uint32_t fb_hdw_reg[208];
+	uint32_t code_ram[0x2000];
+	uint32_t ext_mem[1];
+};
+
+struct qla83xx_fw_dump {
+	uint32_t host_status;
+	uint32_t host_risc_reg[48];
+	uint32_t pcie_regs[4];
+	uint32_t host_reg[32];
+	uint32_t shadow_reg[11];
+	uint32_t risc_io_reg;
+	uint16_t mailbox_reg[32];
+	uint32_t xseq_gp_reg[256];
+	uint32_t xseq_0_reg[48];
+	uint32_t xseq_1_reg[16];
+	uint32_t xseq_2_reg[16];
+	uint32_t rseq_gp_reg[256];
+	uint32_t rseq_0_reg[32];
+	uint32_t rseq_1_reg[16];
+	uint32_t rseq_2_reg[16];
+	uint32_t rseq_3_reg[16];
+	uint32_t aseq_gp_reg[256];
+	uint32_t aseq_0_reg[32];
+	uint32_t aseq_1_reg[16];
+	uint32_t aseq_2_reg[16];
+	uint32_t aseq_3_reg[16];
+	uint32_t cmd_dma_reg[64];
+	uint32_t req0_dma_reg[15];
+	uint32_t resp0_dma_reg[15];
+	uint32_t req1_dma_reg[15];
+	uint32_t xmt0_dma_reg[32];
+	uint32_t xmt1_dma_reg[32];
+	uint32_t xmt2_dma_reg[32];
+	uint32_t xmt3_dma_reg[32];
+	uint32_t xmt4_dma_reg[32];
+	uint32_t xmt_data_dma_reg[16];
+	uint32_t rcvt0_data_dma_reg[32];
+	uint32_t rcvt1_data_dma_reg[32];
+	uint32_t risc_gp_reg[128];
+	uint32_t lmc_reg[128];
+	uint32_t fpm_hdw_reg[256];
+	uint32_t rq0_array_reg[256];
+	uint32_t rq1_array_reg[256];
+	uint32_t rp0_array_reg[256];
+	uint32_t rp1_array_reg[256];
+	uint32_t queue_control_reg[16];
+	uint32_t fb_hdw_reg[432];
+	uint32_t at0_array_reg[128];
+	uint32_t code_ram[0x2400];
+	uint32_t ext_mem[1];
+};
+
+#define EFT_NUM_BUFFERS		4
+#define EFT_BYTES_PER_BUFFER	0x4000
+#define EFT_SIZE		((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
+
+#define FCE_NUM_BUFFERS		64
+#define FCE_BYTES_PER_BUFFER	0x400
+#define FCE_SIZE		((FCE_BYTES_PER_BUFFER) * (FCE_NUM_BUFFERS))
+#define fce_calc_size(b)	((FCE_BYTES_PER_BUFFER) * (b))
+
+struct qla2xxx_fce_chain {
+	uint32_t type;
+	uint32_t chain_size;
+
+	uint32_t size;
+	uint32_t addr_l;
+	uint32_t addr_h;
+	uint32_t eregs[8];
+};
+
+/* used by exchange off load and extended login offload */
+struct qla2xxx_offld_chain {
+	uint32_t type;
+	uint32_t chain_size;
+
+	uint32_t size;
+	u64	 addr;
+};
+
+struct qla2xxx_mq_chain {
+	uint32_t type;
+	uint32_t chain_size;
+
+	uint32_t count;
+	uint32_t qregs[4 * QLA_MQ_SIZE];
+};
+
+struct qla2xxx_mqueue_header {
+	uint32_t queue;
+#define TYPE_REQUEST_QUEUE	0x1
+#define TYPE_RESPONSE_QUEUE	0x2
+#define TYPE_ATIO_QUEUE		0x3
+	uint32_t number;
+	uint32_t size;
+};
+
+struct qla2xxx_mqueue_chain {
+	uint32_t type;
+	uint32_t chain_size;
+};
+
+#define DUMP_CHAIN_VARIANT	0x80000000
+#define DUMP_CHAIN_FCE		0x7FFFFAF0
+#define DUMP_CHAIN_MQ		0x7FFFFAF1
+#define DUMP_CHAIN_QUEUE	0x7FFFFAF2
+#define DUMP_CHAIN_EXLOGIN	0x7FFFFAF3
+#define DUMP_CHAIN_EXCHG	0x7FFFFAF4
+#define DUMP_CHAIN_LAST		0x80000000
+
+struct qla2xxx_fw_dump {
+	uint8_t signature[4];
+	uint32_t version;
+
+	uint32_t fw_major_version;
+	uint32_t fw_minor_version;
+	uint32_t fw_subminor_version;
+	uint32_t fw_attributes;
+
+	uint32_t vendor;
+	uint32_t device;
+	uint32_t subsystem_vendor;
+	uint32_t subsystem_device;
+
+	uint32_t fixed_size;
+	uint32_t mem_size;
+	uint32_t req_q_size;
+	uint32_t rsp_q_size;
+
+	uint32_t eft_size;
+	uint32_t eft_addr_l;
+	uint32_t eft_addr_h;
+
+	uint32_t header_size;
+
+	union {
+		struct qla2100_fw_dump isp21;
+		struct qla2300_fw_dump isp23;
+		struct qla24xx_fw_dump isp24;
+		struct qla25xx_fw_dump isp25;
+		struct qla81xx_fw_dump isp81;
+		struct qla83xx_fw_dump isp83;
+	} isp;
+};
+
+#define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK    0x1e400000
+
+#define ql_log_fatal		0 /* display fatal errors */
+#define ql_log_warn		1 /* display critical errors */
+#define ql_log_info		2 /* display all recovered errors */
+#define ql_log_all		3 /* This value is only used by ql_errlev.
+				   * No messages will use this value.
+				   * This should be always highest value
+				   * as compared to other log levels.
+				   */
+
+extern int ql_errlev;
+
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
+
+
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+
+void __attribute__((format (printf, 4, 5)))
+ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
+
+/* Debug Levels */
+/* The 0x40000000 is the max value any debug level can have
+ * as ql2xextended_error_logging is of type signed int
+ */
+#define ql_dbg_init	0x40000000 /* Init Debug */
+#define ql_dbg_mbx	0x20000000 /* MBX Debug */
+#define ql_dbg_disc	0x10000000 /* Device Discovery Debug */
+#define ql_dbg_io	0x08000000 /* IO Tracing Debug */
+#define ql_dbg_dpc	0x04000000 /* DPC Thead Debug */
+#define ql_dbg_async	0x02000000 /* Async events Debug */
+#define ql_dbg_timer	0x01000000 /* Timer Debug */
+#define ql_dbg_user	0x00800000 /* User Space Interations Debug */
+#define ql_dbg_taskm	0x00400000 /* Task Management Debug */
+#define ql_dbg_aer	0x00200000 /* AER/EEH Debug */
+#define ql_dbg_multiq	0x00100000 /* MultiQ Debug */
+#define ql_dbg_p3p	0x00080000 /* P3P specific Debug */
+#define ql_dbg_vport	0x00040000 /* Virtual Port Debug */
+#define ql_dbg_buffer	0x00020000 /* For dumping the buffer/regs */
+#define ql_dbg_misc	0x00010000 /* For dumping everything that is not
+				    * not covered by upper categories
+				    */
+#define ql_dbg_verbose	0x00008000 /* More verbosity for each level
+				    * This is to be used with other levels where
+				    * more verbosity is required. It might not
+				    * be applicable to all the levels.
+				    */
+#define ql_dbg_tgt	0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt	0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr	0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif  0x00000800 /* Target mode dif */
+
+extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+	uint32_t, void **);
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+	uint32_t, void **);
+extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
+	struct qla_hw_data *);
+extern int qla24xx_soft_reset(struct qla_hw_data *);
+
+static inline int
+ql_mask_match(uint level)
+{
+	return (level & ql2xextended_error_logging) == level;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_def.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_def.h
new file mode 100644
index 0000000..67b3055
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_def.h
@@ -0,0 +1,4494 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_DEF_H
+#define __QLA_DEF_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/aer.h>
+#include <linux/mutex.h>
+#include <linux/btree.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+
+#include "qla_bsg.h"
+#include "qla_nx.h"
+#include "qla_nx2.h"
+#include "qla_nvme.h"
+#define QLA2XXX_DRIVER_NAME	"qla2xxx"
+#define QLA2XXX_APIDEV		"ql2xapidev"
+#define QLA2XXX_MANUFACTURER	"QLogic Corporation"
+
+/*
+ * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
+ * but that's fine as we don't look at the last 24 ones for
+ * ISP2100 HBAs.
+ */
+#define MAILBOX_REGISTER_COUNT_2100	8
+#define MAILBOX_REGISTER_COUNT_2200	24
+#define MAILBOX_REGISTER_COUNT		32
+
+#define QLA2200A_RISC_ROM_VER	4
+#define FPM_2300		6
+#define FPM_2310		7
+
+#include "qla_settings.h"
+
+#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR)
+
+/*
+ * Data bit definitions
+ */
+#define BIT_0	0x1
+#define BIT_1	0x2
+#define BIT_2	0x4
+#define BIT_3	0x8
+#define BIT_4	0x10
+#define BIT_5	0x20
+#define BIT_6	0x40
+#define BIT_7	0x80
+#define BIT_8	0x100
+#define BIT_9	0x200
+#define BIT_10	0x400
+#define BIT_11	0x800
+#define BIT_12	0x1000
+#define BIT_13	0x2000
+#define BIT_14	0x4000
+#define BIT_15	0x8000
+#define BIT_16	0x10000
+#define BIT_17	0x20000
+#define BIT_18	0x40000
+#define BIT_19	0x80000
+#define BIT_20	0x100000
+#define BIT_21	0x200000
+#define BIT_22	0x400000
+#define BIT_23	0x800000
+#define BIT_24	0x1000000
+#define BIT_25	0x2000000
+#define BIT_26	0x4000000
+#define BIT_27	0x8000000
+#define BIT_28	0x10000000
+#define BIT_29	0x20000000
+#define BIT_30	0x40000000
+#define BIT_31	0x80000000
+
+#define LSB(x)	((uint8_t)(x))
+#define MSB(x)	((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x)	((uint16_t)(x))
+#define MSW(x)	((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x)	((uint32_t)((uint64_t)(x)))
+#define MSD(x)	((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
+
+/*
+ * I/O register
+*/
+
+#define RD_REG_BYTE(addr)		readb(addr)
+#define RD_REG_WORD(addr)		readw(addr)
+#define RD_REG_DWORD(addr)		readl(addr)
+#define RD_REG_BYTE_RELAXED(addr)	readb_relaxed(addr)
+#define RD_REG_WORD_RELAXED(addr)	readw_relaxed(addr)
+#define RD_REG_DWORD_RELAXED(addr)	readl_relaxed(addr)
+#define WRT_REG_BYTE(addr, data)	writeb(data,addr)
+#define WRT_REG_WORD(addr, data)	writew(data,addr)
+#define WRT_REG_DWORD(addr, data)	writel(data,addr)
+
+/*
+ * ISP83XX specific remote register addresses
+ */
+#define QLA83XX_LED_PORT0			0x00201320
+#define QLA83XX_LED_PORT1			0x00201328
+#define QLA83XX_IDC_DEV_STATE		0x22102384
+#define QLA83XX_IDC_MAJOR_VERSION	0x22102380
+#define QLA83XX_IDC_MINOR_VERSION	0x22102398
+#define QLA83XX_IDC_DRV_PRESENCE	0x22102388
+#define QLA83XX_IDC_DRIVER_ACK		0x2210238c
+#define QLA83XX_IDC_CONTROL			0x22102390
+#define QLA83XX_IDC_AUDIT			0x22102394
+#define QLA83XX_IDC_LOCK_RECOVERY	0x2210239c
+#define QLA83XX_DRIVER_LOCKID		0x22102104
+#define QLA83XX_DRIVER_LOCK			0x8111c028
+#define QLA83XX_DRIVER_UNLOCK		0x8111c02c
+#define QLA83XX_FLASH_LOCKID		0x22102100
+#define QLA83XX_FLASH_LOCK			0x8111c010
+#define QLA83XX_FLASH_UNLOCK		0x8111c014
+#define QLA83XX_DEV_PARTINFO1		0x221023e0
+#define QLA83XX_DEV_PARTINFO2		0x221023e4
+#define QLA83XX_FW_HEARTBEAT		0x221020b0
+#define QLA83XX_PEG_HALT_STATUS1	0x221020a8
+#define QLA83XX_PEG_HALT_STATUS2	0x221020ac
+
+/* 83XX: Macros defining 8200 AEN Reason codes */
+#define IDC_DEVICE_STATE_CHANGE BIT_0
+#define IDC_PEG_HALT_STATUS_CHANGE BIT_1
+#define IDC_NIC_FW_REPORTED_FAILURE BIT_2
+#define IDC_HEARTBEAT_FAILURE BIT_3
+
+/* 83XX: Macros defining 8200 AEN Error-levels */
+#define ERR_LEVEL_NON_FATAL 0x1
+#define ERR_LEVEL_RECOVERABLE_FATAL 0x2
+#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4
+
+/* 83XX: Macros for IDC Version */
+#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01
+#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0
+
+/* 83XX: Macros for scheduling dpc tasks */
+#define QLA83XX_NIC_CORE_RESET 0x1
+#define QLA83XX_IDC_STATE_HANDLER 0x2
+#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3
+
+/* 83XX: Macros for defining IDC-Control bits */
+#define QLA83XX_IDC_RESET_DISABLED BIT_0
+#define QLA83XX_IDC_GRACEFUL_RESET BIT_1
+
+/* 83XX: Macros for different timeouts */
+#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30
+#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10
+#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ)
+
+/* 83XX: Macros for defining class in DEV-Partition Info register */
+#define QLA83XX_CLASS_TYPE_NONE		0x0
+#define QLA83XX_CLASS_TYPE_NIC		0x1
+#define QLA83XX_CLASS_TYPE_FCOE		0x2
+#define QLA83XX_CLASS_TYPE_ISCSI	0x3
+
+/* 83XX: Macros for IDC Lock-Recovery stages */
+#define IDC_LOCK_RECOVERY_STAGE1	0x1 /* Stage1: Intent for
+					     * lock-recovery
+					     */
+#define IDC_LOCK_RECOVERY_STAGE2	0x2 /* Stage2: Perform lock-recovery */
+
+/* 83XX: Macros for IDC Audit type */
+#define IDC_AUDIT_TIMESTAMP		0x0 /* IDC-AUDIT: Record timestamp of
+					     * dev-state change to NEED-RESET
+					     * or NEED-QUIESCENT
+					     */
+#define IDC_AUDIT_COMPLETION		0x1 /* IDC-AUDIT: Record duration of
+					     * reset-recovery completion is
+					     * second
+					     */
+/* ISP2031: Values for laser on/off */
+#define PORT_0_2031	0x00201340
+#define PORT_1_2031	0x00201350
+#define LASER_ON_2031	0x01800100
+#define LASER_OFF_2031	0x01800180
+
+/*
+ * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
+ * 133Mhz slot.
+ */
+#define RD_REG_WORD_PIO(addr)		(inw((unsigned long)addr))
+#define WRT_REG_WORD_PIO(addr, data)	(outw(data,(unsigned long)addr))
+
+/*
+ * Fibre Channel device definitions.
+ */
+#define WWN_SIZE		8	/* Size of WWPN, WWN & WWNN */
+#define MAX_FIBRE_DEVICES_2100	512
+#define MAX_FIBRE_DEVICES_2400	2048
+#define MAX_FIBRE_DEVICES_LOOP	128
+#define MAX_FIBRE_DEVICES_MAX	MAX_FIBRE_DEVICES_2400
+#define LOOPID_MAP_SIZE		(ha->max_fibre_devices)
+#define MAX_FIBRE_LUNS  	0xFFFF
+#define	MAX_HOST_COUNT		16
+
+/*
+ * Host adapter default definitions.
+ */
+#define MAX_BUSES		1  /* We only have one bus today */
+#define MIN_LUNS		8
+#define MAX_LUNS		MAX_FIBRE_LUNS
+#define MAX_CMDS_PER_LUN	255
+
+/*
+ * Fibre Channel device definitions.
+ */
+#define SNS_LAST_LOOP_ID_2100	0xfe
+#define SNS_LAST_LOOP_ID_2300	0x7ff
+
+#define LAST_LOCAL_LOOP_ID	0x7d
+#define SNS_FL_PORT		0x7e
+#define FABRIC_CONTROLLER	0x7f
+#define SIMPLE_NAME_SERVER	0x80
+#define SNS_FIRST_LOOP_ID	0x81
+#define MANAGEMENT_SERVER	0xfe
+#define BROADCAST		0xff
+
+/*
+ * There is no correspondence between an N-PORT id and an AL_PA.  Therefore the
+ * valid range of an N-PORT id is 0 through 0x7ef.
+ */
+#define NPH_LAST_HANDLE		0x7ef
+#define NPH_MGMT_SERVER		0x7fa		/*  FFFFFA */
+#define NPH_SNS			0x7fc		/*  FFFFFC */
+#define NPH_FABRIC_CONTROLLER	0x7fd		/*  FFFFFD */
+#define NPH_F_PORT		0x7fe		/*  FFFFFE */
+#define NPH_IP_BROADCAST	0x7ff		/*  FFFFFF */
+
+#define NPH_SNS_LID(ha)	(IS_FWI2_CAPABLE(ha) ? NPH_SNS : SIMPLE_NAME_SERVER)
+
+#define MAX_CMDSZ	16		/* SCSI maximum CDB size. */
+#include "qla_fw.h"
+
+struct name_list_extended {
+	struct get_name_list_extended *l;
+	dma_addr_t		ldma;
+	struct list_head 	fcports;	/* protect by sess_list */
+	u32			size;
+	u8			sent;
+};
+/*
+ * Timeout timer counts in seconds
+ */
+#define PORT_RETRY_TIME			1
+#define LOOP_DOWN_TIMEOUT		60
+#define LOOP_DOWN_TIME			255	/* 240 */
+#define	LOOP_DOWN_RESET			(LOOP_DOWN_TIME - 30)
+
+#define DEFAULT_OUTSTANDING_COMMANDS	4096
+#define MIN_OUTSTANDING_COMMANDS	128
+
+/* ISP request and response entry counts (37-65535) */
+#define REQUEST_ENTRY_CNT_2100		128	/* Number of request entries. */
+#define REQUEST_ENTRY_CNT_2200		2048	/* Number of request entries. */
+#define REQUEST_ENTRY_CNT_24XX		2048	/* Number of request entries. */
+#define REQUEST_ENTRY_CNT_83XX		8192	/* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_83XX		4096	/* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_2100		64	/* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_2300		512	/* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_MQ		128	/* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX		4096	/* Number of ATIO entries. */
+#define RESPONSE_ENTRY_CNT_FX00		256     /* Number of response entries.*/
+#define FW_DEF_EXCHANGES_CNT 2048
+
+struct req_que;
+struct qla_tgt_sess;
+
+/*
+ * SCSI Request Block
+ */
+struct srb_cmd {
+	struct scsi_cmnd *cmd;		/* Linux SCSI command pkt */
+	uint32_t request_sense_length;
+	uint32_t fw_sense_length;
+	uint8_t *request_sense_ptr;
+	void *ctx;
+};
+
+/*
+ * SRB flag definitions
+ */
+#define SRB_DMA_VALID			BIT_0	/* Command sent to ISP */
+#define SRB_FCP_CMND_DMA_VALID		BIT_12	/* DIF: DSD List valid */
+#define SRB_CRC_CTX_DMA_VALID		BIT_2	/* DIF: context DMA valid */
+#define SRB_CRC_PROT_DMA_VALID		BIT_4	/* DIF: prot DMA valid */
+#define SRB_CRC_CTX_DSD_VALID		BIT_5	/* DIF: dsd_list valid */
+
+/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
+#define IS_PROT_IO(sp)	(sp->flags & SRB_CRC_CTX_DSD_VALID)
+
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+	uint32_t b24 : 24;
+
+	struct {
+#ifdef __BIG_ENDIAN
+		uint8_t domain;
+		uint8_t area;
+		uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+		uint8_t al_pa;
+		uint8_t area;
+		uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+		uint8_t rsvd_1;
+	} b;
+} port_id_t;
+#define INVALID_PORT_ID	0xFFFFFF
+
+struct els_logo_payload {
+	uint8_t opcode;
+	uint8_t rsvd[3];
+	uint8_t s_id[3];
+	uint8_t rsvd1[1];
+	uint8_t wwpn[WWN_SIZE];
+};
+
+struct ct_arg {
+	void		*iocb;
+	u16		nport_handle;
+	dma_addr_t	req_dma;
+	dma_addr_t	rsp_dma;
+	u32		req_size;
+	u32		rsp_size;
+	void		*req;
+	void		*rsp;
+	port_id_t	id;
+};
+
+/*
+ * SRB extensions.
+ */
+struct srb_iocb {
+	union {
+		struct {
+			uint16_t flags;
+#define SRB_LOGIN_RETRIED	BIT_0
+#define SRB_LOGIN_COND_PLOGI	BIT_1
+#define SRB_LOGIN_SKIP_PRLI	BIT_2
+#define SRB_LOGIN_NVME_PRLI	BIT_3
+			uint16_t data[2];
+			u32 iop[2];
+		} logio;
+		struct {
+#define ELS_DCMD_TIMEOUT 20
+#define ELS_DCMD_LOGO 0x5
+			uint32_t flags;
+			uint32_t els_cmd;
+			struct completion comp;
+			struct els_logo_payload *els_logo_pyld;
+			dma_addr_t els_logo_pyld_dma;
+		} els_logo;
+		struct {
+			/*
+			 * Values for flags field below are as
+			 * defined in tsk_mgmt_entry struct
+			 * for control_flags field in qla_fw.h.
+			 */
+			uint64_t lun;
+			uint32_t flags;
+			uint32_t data;
+			struct completion comp;
+			__le16 comp_status;
+		} tmf;
+		struct {
+#define SRB_FXDISC_REQ_DMA_VALID	BIT_0
+#define SRB_FXDISC_RESP_DMA_VALID	BIT_1
+#define SRB_FXDISC_REQ_DWRD_VALID	BIT_2
+#define SRB_FXDISC_RSP_DWRD_VALID	BIT_3
+#define FXDISC_TIMEOUT 20
+			uint8_t flags;
+			uint32_t req_len;
+			uint32_t rsp_len;
+			void *req_addr;
+			void *rsp_addr;
+			dma_addr_t req_dma_handle;
+			dma_addr_t rsp_dma_handle;
+			__le32 adapter_id;
+			__le32 adapter_id_hi;
+			__le16 req_func_type;
+			__le32 req_data;
+			__le32 req_data_extra;
+			__le32 result;
+			__le32 seq_number;
+			__le16 fw_flags;
+			struct completion fxiocb_comp;
+			__le32 reserved_0;
+			uint8_t reserved_1;
+		} fxiocb;
+		struct {
+			uint32_t cmd_hndl;
+			__le16 comp_status;
+			struct completion comp;
+		} abt;
+		struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
+		struct {
+			__le16 in_mb[MAX_IOCB_MB_REG];	/* from FW */
+			__le16 out_mb[MAX_IOCB_MB_REG];	/* to FW */
+			void *out, *in;
+			dma_addr_t out_dma, in_dma;
+			struct completion comp;
+			int rc;
+		} mbx;
+		struct {
+			struct imm_ntfy_from_isp *ntfy;
+		} nack;
+		struct {
+			__le16 comp_status;
+			uint16_t rsp_pyld_len;
+			uint8_t	aen_op;
+			void *desc;
+
+			/* These are only used with ls4 requests */
+			int cmd_len;
+			int rsp_len;
+			dma_addr_t cmd_dma;
+			dma_addr_t rsp_dma;
+			enum nvmefc_fcp_datadir dir;
+			uint32_t dl;
+			uint32_t timeout_sec;
+			struct	list_head   entry;
+		} nvme;
+	} u;
+
+	struct timer_list timer;
+	void (*timeout)(void *);
+};
+
+/* Values for srb_ctx type */
+#define SRB_LOGIN_CMD	1
+#define SRB_LOGOUT_CMD	2
+#define SRB_ELS_CMD_RPT 3
+#define SRB_ELS_CMD_HST 4
+#define SRB_CT_CMD	5
+#define SRB_ADISC_CMD	6
+#define SRB_TM_CMD	7
+#define SRB_SCSI_CMD	8
+#define SRB_BIDI_CMD	9
+#define SRB_FXIOCB_DCMD	10
+#define SRB_FXIOCB_BCMD	11
+#define SRB_ABT_CMD	12
+#define SRB_ELS_DCMD	13
+#define SRB_MB_IOCB	14
+#define SRB_CT_PTHRU_CMD 15
+#define SRB_NACK_PLOGI	16
+#define SRB_NACK_PRLI	17
+#define SRB_NACK_LOGO	18
+#define SRB_NVME_CMD	19
+#define SRB_NVME_LS	20
+#define SRB_PRLI_CMD	21
+
+enum {
+	TYPE_SRB,
+	TYPE_TGT_CMD,
+};
+
+typedef struct srb {
+	/*
+	 * Do not move cmd_type field, it needs to
+	 * line up with qla_tgt_cmd->cmd_type
+	 */
+	uint8_t cmd_type;
+	uint8_t pad[3];
+	atomic_t ref_count;
+	wait_queue_head_t nvme_ls_waitq;
+	struct fc_port *fcport;
+	struct scsi_qla_host *vha;
+	uint32_t handle;
+	uint16_t flags;
+	uint16_t type;
+	const char *name;
+	int iocbs;
+	struct qla_qpair *qpair;
+	struct list_head elem;
+	u32 gen1;	/* scratch */
+	u32 gen2;	/* scratch */
+	union {
+		struct srb_iocb iocb_cmd;
+		struct bsg_job *bsg_job;
+		struct srb_cmd scmd;
+	} u;
+	void (*done)(void *, int);
+	void (*free)(void *);
+} srb_t;
+
+#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
+#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
+#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
+
+#define GET_CMD_SENSE_LEN(sp) \
+	(sp->u.scmd.request_sense_length)
+#define SET_CMD_SENSE_LEN(sp, len) \
+	(sp->u.scmd.request_sense_length = len)
+#define GET_CMD_SENSE_PTR(sp) \
+	(sp->u.scmd.request_sense_ptr)
+#define SET_CMD_SENSE_PTR(sp, ptr) \
+	(sp->u.scmd.request_sense_ptr = ptr)
+#define GET_FW_SENSE_LEN(sp) \
+	(sp->u.scmd.fw_sense_length)
+#define SET_FW_SENSE_LEN(sp, len) \
+	(sp->u.scmd.fw_sense_length = len)
+
+struct msg_echo_lb {
+	dma_addr_t send_dma;
+	dma_addr_t rcv_dma;
+	uint16_t req_sg_cnt;
+	uint16_t rsp_sg_cnt;
+	uint16_t options;
+	uint32_t transfer_size;
+	uint32_t iteration_count;
+};
+
+/*
+ * ISP I/O Register Set structure definitions.
+ */
+struct device_reg_2xxx {
+	uint16_t flash_address; 	/* Flash BIOS address */
+	uint16_t flash_data;		/* Flash BIOS data */
+	uint16_t unused_1[1];		/* Gap */
+	uint16_t ctrl_status;		/* Control/Status */
+#define CSR_FLASH_64K_BANK	BIT_3	/* Flash upper 64K bank select */
+#define CSR_FLASH_ENABLE	BIT_1	/* Flash BIOS Read/Write enable */
+#define CSR_ISP_SOFT_RESET	BIT_0	/* ISP soft reset */
+
+	uint16_t ictrl;			/* Interrupt control */
+#define ICR_EN_INT		BIT_15	/* ISP enable interrupts. */
+#define ICR_EN_RISC		BIT_3	/* ISP enable RISC interrupts. */
+
+	uint16_t istatus;		/* Interrupt status */
+#define ISR_RISC_INT		BIT_3	/* RISC interrupt */
+
+	uint16_t semaphore;		/* Semaphore */
+	uint16_t nvram;			/* NVRAM register. */
+#define NVR_DESELECT		0
+#define NVR_BUSY		BIT_15
+#define NVR_WRT_ENABLE		BIT_14	/* Write enable */
+#define NVR_PR_ENABLE		BIT_13	/* Protection register enable */
+#define NVR_DATA_IN		BIT_3
+#define NVR_DATA_OUT		BIT_2
+#define NVR_SELECT		BIT_1
+#define NVR_CLOCK		BIT_0
+
+#define NVR_WAIT_CNT		20000
+
+	union {
+		struct {
+			uint16_t mailbox0;
+			uint16_t mailbox1;
+			uint16_t mailbox2;
+			uint16_t mailbox3;
+			uint16_t mailbox4;
+			uint16_t mailbox5;
+			uint16_t mailbox6;
+			uint16_t mailbox7;
+			uint16_t unused_2[59];	/* Gap */
+		} __attribute__((packed)) isp2100;
+		struct {
+						/* Request Queue */
+			uint16_t req_q_in;	/*  In-Pointer */
+			uint16_t req_q_out;	/*  Out-Pointer */
+						/* Response Queue */
+			uint16_t rsp_q_in;	/*  In-Pointer */
+			uint16_t rsp_q_out;	/*  Out-Pointer */
+
+						/* RISC to Host Status */
+			uint32_t host_status;
+#define HSR_RISC_INT		BIT_15	/* RISC interrupt */
+#define HSR_RISC_PAUSED		BIT_8	/* RISC Paused */
+
+					/* Host to Host Semaphore */
+			uint16_t host_semaphore;
+			uint16_t unused_3[17];	/* Gap */
+			uint16_t mailbox0;
+			uint16_t mailbox1;
+			uint16_t mailbox2;
+			uint16_t mailbox3;
+			uint16_t mailbox4;
+			uint16_t mailbox5;
+			uint16_t mailbox6;
+			uint16_t mailbox7;
+			uint16_t mailbox8;
+			uint16_t mailbox9;
+			uint16_t mailbox10;
+			uint16_t mailbox11;
+			uint16_t mailbox12;
+			uint16_t mailbox13;
+			uint16_t mailbox14;
+			uint16_t mailbox15;
+			uint16_t mailbox16;
+			uint16_t mailbox17;
+			uint16_t mailbox18;
+			uint16_t mailbox19;
+			uint16_t mailbox20;
+			uint16_t mailbox21;
+			uint16_t mailbox22;
+			uint16_t mailbox23;
+			uint16_t mailbox24;
+			uint16_t mailbox25;
+			uint16_t mailbox26;
+			uint16_t mailbox27;
+			uint16_t mailbox28;
+			uint16_t mailbox29;
+			uint16_t mailbox30;
+			uint16_t mailbox31;
+			uint16_t fb_cmd;
+			uint16_t unused_4[10];	/* Gap */
+		} __attribute__((packed)) isp2300;
+	} u;
+
+	uint16_t fpm_diag_config;
+	uint16_t unused_5[0x4];		/* Gap */
+	uint16_t risc_hw;
+	uint16_t unused_5_1;		/* Gap */
+	uint16_t pcr;			/* Processor Control Register. */
+	uint16_t unused_6[0x5];		/* Gap */
+	uint16_t mctr;			/* Memory Configuration and Timing. */
+	uint16_t unused_7[0x3];		/* Gap */
+	uint16_t fb_cmd_2100;		/* Unused on 23XX */
+	uint16_t unused_8[0x3];		/* Gap */
+	uint16_t hccr;			/* Host command & control register. */
+#define HCCR_HOST_INT		BIT_7	/* Host interrupt bit */
+#define HCCR_RISC_PAUSE		BIT_5	/* Pause mode bit */
+					/* HCCR commands */
+#define HCCR_RESET_RISC		0x1000	/* Reset RISC */
+#define HCCR_PAUSE_RISC		0x2000	/* Pause RISC */
+#define HCCR_RELEASE_RISC	0x3000	/* Release RISC from reset. */
+#define HCCR_SET_HOST_INT	0x5000	/* Set host interrupt */
+#define HCCR_CLR_HOST_INT	0x6000	/* Clear HOST interrupt */
+#define HCCR_CLR_RISC_INT	0x7000	/* Clear RISC interrupt */
+#define	HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */
+#define HCCR_ENABLE_PARITY	0xA000	/* Enable PARITY interrupt */
+
+	uint16_t unused_9[5];		/* Gap */
+	uint16_t gpiod;			/* GPIO Data register. */
+	uint16_t gpioe;			/* GPIO Enable register. */
+#define GPIO_LED_MASK			0x00C0
+#define GPIO_LED_GREEN_OFF_AMBER_OFF	0x0000
+#define GPIO_LED_GREEN_ON_AMBER_OFF	0x0040
+#define GPIO_LED_GREEN_OFF_AMBER_ON	0x0080
+#define GPIO_LED_GREEN_ON_AMBER_ON	0x00C0
+#define GPIO_LED_ALL_OFF		0x0000
+#define GPIO_LED_RED_ON_OTHER_OFF	0x0001	/* isp2322 */
+#define GPIO_LED_RGA_ON			0x00C1	/* isp2322: red green amber */
+
+	union {
+		struct {
+			uint16_t unused_10[8];	/* Gap */
+			uint16_t mailbox8;
+			uint16_t mailbox9;
+			uint16_t mailbox10;
+			uint16_t mailbox11;
+			uint16_t mailbox12;
+			uint16_t mailbox13;
+			uint16_t mailbox14;
+			uint16_t mailbox15;
+			uint16_t mailbox16;
+			uint16_t mailbox17;
+			uint16_t mailbox18;
+			uint16_t mailbox19;
+			uint16_t mailbox20;
+			uint16_t mailbox21;
+			uint16_t mailbox22;
+			uint16_t mailbox23;	/* Also probe reg. */
+		} __attribute__((packed)) isp2200;
+	} u_end;
+};
+
+struct device_reg_25xxmq {
+	uint32_t req_q_in;
+	uint32_t req_q_out;
+	uint32_t rsp_q_in;
+	uint32_t rsp_q_out;
+	uint32_t atio_q_in;
+	uint32_t atio_q_out;
+};
+
+
+struct device_reg_fx00 {
+	uint32_t mailbox0;		/* 00 */
+	uint32_t mailbox1;		/* 04 */
+	uint32_t mailbox2;		/* 08 */
+	uint32_t mailbox3;		/* 0C */
+	uint32_t mailbox4;		/* 10 */
+	uint32_t mailbox5;		/* 14 */
+	uint32_t mailbox6;		/* 18 */
+	uint32_t mailbox7;		/* 1C */
+	uint32_t mailbox8;		/* 20 */
+	uint32_t mailbox9;		/* 24 */
+	uint32_t mailbox10;		/* 28 */
+	uint32_t mailbox11;
+	uint32_t mailbox12;
+	uint32_t mailbox13;
+	uint32_t mailbox14;
+	uint32_t mailbox15;
+	uint32_t mailbox16;
+	uint32_t mailbox17;
+	uint32_t mailbox18;
+	uint32_t mailbox19;
+	uint32_t mailbox20;
+	uint32_t mailbox21;
+	uint32_t mailbox22;
+	uint32_t mailbox23;
+	uint32_t mailbox24;
+	uint32_t mailbox25;
+	uint32_t mailbox26;
+	uint32_t mailbox27;
+	uint32_t mailbox28;
+	uint32_t mailbox29;
+	uint32_t mailbox30;
+	uint32_t mailbox31;
+	uint32_t aenmailbox0;
+	uint32_t aenmailbox1;
+	uint32_t aenmailbox2;
+	uint32_t aenmailbox3;
+	uint32_t aenmailbox4;
+	uint32_t aenmailbox5;
+	uint32_t aenmailbox6;
+	uint32_t aenmailbox7;
+	/* Request Queue. */
+	uint32_t req_q_in;		/* A0 - Request Queue In-Pointer */
+	uint32_t req_q_out;		/* A4 - Request Queue Out-Pointer */
+	/* Response Queue. */
+	uint32_t rsp_q_in;		/* A8 - Response Queue In-Pointer */
+	uint32_t rsp_q_out;		/* AC - Response Queue Out-Pointer */
+	/* Init values shadowed on FW Up Event */
+	uint32_t initval0;		/* B0 */
+	uint32_t initval1;		/* B4 */
+	uint32_t initval2;		/* B8 */
+	uint32_t initval3;		/* BC */
+	uint32_t initval4;		/* C0 */
+	uint32_t initval5;		/* C4 */
+	uint32_t initval6;		/* C8 */
+	uint32_t initval7;		/* CC */
+	uint32_t fwheartbeat;		/* D0 */
+	uint32_t pseudoaen;		/* D4 */
+};
+
+
+
+typedef union {
+		struct device_reg_2xxx isp;
+		struct device_reg_24xx isp24;
+		struct device_reg_25xxmq isp25mq;
+		struct device_reg_82xx isp82;
+		struct device_reg_fx00 ispfx00;
+} __iomem device_reg_t;
+
+#define ISP_REQ_Q_IN(ha, reg) \
+	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+	 &(reg)->u.isp2100.mailbox4 : \
+	 &(reg)->u.isp2300.req_q_in)
+#define ISP_REQ_Q_OUT(ha, reg) \
+	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+	 &(reg)->u.isp2100.mailbox4 : \
+	 &(reg)->u.isp2300.req_q_out)
+#define ISP_RSP_Q_IN(ha, reg) \
+	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+	 &(reg)->u.isp2100.mailbox5 : \
+	 &(reg)->u.isp2300.rsp_q_in)
+#define ISP_RSP_Q_OUT(ha, reg) \
+	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+	 &(reg)->u.isp2100.mailbox5 : \
+	 &(reg)->u.isp2300.rsp_q_out)
+
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
+#define MAILBOX_REG(ha, reg, num) \
+	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+	 (num < 8 ? \
+	  &(reg)->u.isp2100.mailbox0 + (num) : \
+	  &(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \
+	 &(reg)->u.isp2300.mailbox0 + (num))
+#define RD_MAILBOX_REG(ha, reg, num) \
+	RD_REG_WORD(MAILBOX_REG(ha, reg, num))
+#define WRT_MAILBOX_REG(ha, reg, num, data) \
+	WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data)
+
+#define FB_CMD_REG(ha, reg) \
+	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
+	 &(reg)->fb_cmd_2100 : \
+	 &(reg)->u.isp2300.fb_cmd)
+#define RD_FB_CMD_REG(ha, reg) \
+	RD_REG_WORD(FB_CMD_REG(ha, reg))
+#define WRT_FB_CMD_REG(ha, reg, data) \
+	WRT_REG_WORD(FB_CMD_REG(ha, reg), data)
+
+typedef struct {
+	uint32_t	out_mb;		/* outbound from driver */
+	uint32_t	in_mb;			/* Incoming from RISC */
+	uint16_t	mb[MAILBOX_REGISTER_COUNT];
+	long		buf_size;
+	void		*bufp;
+	uint32_t	tov;
+	uint8_t		flags;
+#define MBX_DMA_IN	BIT_0
+#define	MBX_DMA_OUT	BIT_1
+#define IOCTL_CMD	BIT_2
+} mbx_cmd_t;
+
+struct mbx_cmd_32 {
+	uint32_t	out_mb;		/* outbound from driver */
+	uint32_t	in_mb;			/* Incoming from RISC */
+	uint32_t	mb[MAILBOX_REGISTER_COUNT];
+	long		buf_size;
+	void		*bufp;
+	uint32_t	tov;
+	uint8_t		flags;
+#define MBX_DMA_IN	BIT_0
+#define	MBX_DMA_OUT	BIT_1
+#define IOCTL_CMD	BIT_2
+};
+
+
+#define	MBX_TOV_SECONDS	30
+
+/*
+ *  ISP product identification definitions in mailboxes after reset.
+ */
+#define PROD_ID_1		0x4953
+#define PROD_ID_2		0x0000
+#define PROD_ID_2a		0x5020
+#define PROD_ID_3		0x2020
+
+/*
+ * ISP mailbox Self-Test status codes
+ */
+#define MBS_FRM_ALIVE		0	/* Firmware Alive. */
+#define MBS_CHKSUM_ERR		1	/* Checksum Error. */
+#define MBS_BUSY		4	/* Busy. */
+
+/*
+ * ISP mailbox command complete status codes
+ */
+#define MBS_COMMAND_COMPLETE		0x4000
+#define MBS_INVALID_COMMAND		0x4001
+#define MBS_HOST_INTERFACE_ERROR	0x4002
+#define MBS_TEST_FAILED			0x4003
+#define MBS_COMMAND_ERROR		0x4005
+#define MBS_COMMAND_PARAMETER_ERROR	0x4006
+#define MBS_PORT_ID_USED		0x4007
+#define MBS_LOOP_ID_USED		0x4008
+#define MBS_ALL_IDS_IN_USE		0x4009
+#define MBS_NOT_LOGGED_IN		0x400A
+#define MBS_LINK_DOWN_ERROR		0x400B
+#define MBS_DIAG_ECHO_TEST_ERROR	0x400C
+
+/*
+ * ISP mailbox asynchronous event status codes
+ */
+#define MBA_ASYNC_EVENT		0x8000	/* Asynchronous event. */
+#define MBA_RESET		0x8001	/* Reset Detected. */
+#define MBA_SYSTEM_ERR		0x8002	/* System Error. */
+#define MBA_REQ_TRANSFER_ERR	0x8003	/* Request Transfer Error. */
+#define MBA_RSP_TRANSFER_ERR	0x8004	/* Response Transfer Error. */
+#define MBA_WAKEUP_THRES	0x8005	/* Request Queue Wake-up. */
+#define MBA_LIP_OCCURRED	0x8010	/* Loop Initialization Procedure */
+					/* occurred. */
+#define MBA_LOOP_UP		0x8011	/* FC Loop UP. */
+#define MBA_LOOP_DOWN		0x8012	/* FC Loop Down. */
+#define MBA_LIP_RESET		0x8013	/* LIP reset occurred. */
+#define MBA_PORT_UPDATE		0x8014	/* Port Database update. */
+#define MBA_RSCN_UPDATE		0x8015	/* Register State Chg Notification. */
+#define MBA_LIP_F8		0x8016	/* Received a LIP F8. */
+#define MBA_LOOP_INIT_ERR	0x8017	/* Loop Initialization Error. */
+#define MBA_FABRIC_AUTH_REQ	0x801b	/* Fabric Authentication Required. */
+#define MBA_SCSI_COMPLETION	0x8020	/* SCSI Command Complete. */
+#define MBA_CTIO_COMPLETION	0x8021	/* CTIO Complete. */
+#define MBA_IP_COMPLETION	0x8022	/* IP Transmit Command Complete. */
+#define MBA_IP_RECEIVE		0x8023	/* IP Received. */
+#define MBA_IP_BROADCAST	0x8024	/* IP Broadcast Received. */
+#define MBA_IP_LOW_WATER_MARK	0x8025	/* IP Low Water Mark reached. */
+#define MBA_IP_RCV_BUFFER_EMPTY 0x8026	/* IP receive buffer queue empty. */
+#define MBA_IP_HDR_DATA_SPLIT	0x8027	/* IP header/data splitting feature */
+					/* used. */
+#define MBA_TRACE_NOTIFICATION	0x8028	/* Trace/Diagnostic notification. */
+#define MBA_POINT_TO_POINT	0x8030	/* Point to point mode. */
+#define MBA_CMPLT_1_16BIT	0x8031	/* Completion 1 16bit IOSB. */
+#define MBA_CMPLT_2_16BIT	0x8032	/* Completion 2 16bit IOSB. */
+#define MBA_CMPLT_3_16BIT	0x8033	/* Completion 3 16bit IOSB. */
+#define MBA_CMPLT_4_16BIT	0x8034	/* Completion 4 16bit IOSB. */
+#define MBA_CMPLT_5_16BIT	0x8035	/* Completion 5 16bit IOSB. */
+#define MBA_CHG_IN_CONNECTION	0x8036	/* Change in connection mode. */
+#define MBA_RIO_RESPONSE	0x8040	/* RIO response queue update. */
+#define MBA_ZIO_RESPONSE	0x8040	/* ZIO response queue update. */
+#define MBA_CMPLT_2_32BIT	0x8042	/* Completion 2 32bit IOSB. */
+#define MBA_BYPASS_NOTIFICATION	0x8043	/* Auto bypass notification. */
+#define MBA_DISCARD_RND_FRAME	0x8048	/* discard RND frame due to error. */
+#define MBA_REJECTED_FCP_CMD	0x8049	/* rejected FCP_CMD. */
+#define MBA_FW_NOT_STARTED	0x8050	/* Firmware not started */
+#define MBA_FW_STARTING		0x8051	/* Firmware starting */
+#define MBA_FW_RESTART_CMPLT	0x8060	/* Firmware restart complete */
+#define MBA_INIT_REQUIRED	0x8061	/* Initialization required */
+#define MBA_SHUTDOWN_REQUESTED	0x8062	/* Shutdown Requested */
+#define MBA_TEMPERATURE_ALERT	0x8070	/* Temperature Alert */
+#define MBA_DPORT_DIAGNOSTICS	0x8080	/* D-port Diagnostics */
+#define MBA_TRANS_INSERT	0x8130	/* Transceiver Insertion */
+#define MBA_FW_INIT_FAILURE	0x8401	/* Firmware initialization failure */
+#define MBA_MIRROR_LUN_CHANGE	0x8402	/* Mirror LUN State Change
+					   Notification */
+#define MBA_FW_POLL_STATE	0x8600  /* Firmware in poll diagnostic state */
+#define MBA_FW_RESET_FCT	0x8502	/* Firmware reset factory defaults */
+#define MBA_FW_INIT_INPROGRESS	0x8500	/* Firmware boot in progress */
+/* 83XX FCoE specific */
+#define MBA_IDC_AEN		0x8200  /* FCoE: NIC Core state change AEN */
+
+/* Interrupt type codes */
+#define INTR_ROM_MB_SUCCESS		0x1
+#define INTR_ROM_MB_FAILED		0x2
+#define INTR_MB_SUCCESS			0x10
+#define INTR_MB_FAILED			0x11
+#define INTR_ASYNC_EVENT		0x12
+#define INTR_RSP_QUE_UPDATE		0x13
+#define INTR_RSP_QUE_UPDATE_83XX	0x14
+#define INTR_ATIO_QUE_UPDATE		0x1C
+#define INTR_ATIO_RSP_QUE_UPDATE	0x1D
+
+/* ISP mailbox loopback echo diagnostic error code */
+#define MBS_LB_RESET	0x17
+/*
+ * Firmware options 1, 2, 3.
+ */
+#define FO1_AE_ON_LIPF8			BIT_0
+#define FO1_AE_ALL_LIP_RESET		BIT_1
+#define FO1_CTIO_RETRY			BIT_3
+#define FO1_DISABLE_LIP_F7_SW		BIT_4
+#define FO1_DISABLE_100MS_LOS_WAIT	BIT_5
+#define FO1_DISABLE_GPIO6_7		BIT_6	/* LED bits */
+#define FO1_AE_ON_LOOP_INIT_ERR		BIT_7
+#define FO1_SET_EMPHASIS_SWING		BIT_8
+#define FO1_AE_AUTO_BYPASS		BIT_9
+#define FO1_ENABLE_PURE_IOCB		BIT_10
+#define FO1_AE_PLOGI_RJT		BIT_11
+#define FO1_ENABLE_ABORT_SEQUENCE	BIT_12
+#define FO1_AE_QUEUE_FULL		BIT_13
+
+#define FO2_ENABLE_ATIO_TYPE_3		BIT_0
+#define FO2_REV_LOOPBACK		BIT_1
+
+#define FO3_ENABLE_EMERG_IOCB		BIT_0
+#define FO3_AE_RND_ERROR		BIT_1
+
+/* 24XX additional firmware options */
+#define ADD_FO_COUNT			3
+#define ADD_FO1_DISABLE_GPIO_LED_CTRL	BIT_6	/* LED bits */
+#define ADD_FO1_ENABLE_PUREX_IOCB	BIT_10
+
+#define ADD_FO2_ENABLE_SEL_CLS2		BIT_5
+
+#define ADD_FO3_NO_ABT_ON_LINK_DOWN	BIT_14
+
+/*
+ * ISP mailbox commands
+ */
+#define MBC_LOAD_RAM			1	/* Load RAM. */
+#define MBC_EXECUTE_FIRMWARE		2	/* Execute firmware. */
+#define MBC_READ_RAM_WORD		5	/* Read RAM word. */
+#define MBC_MAILBOX_REGISTER_TEST	6	/* Wrap incoming mailboxes */
+#define MBC_VERIFY_CHECKSUM		7	/* Verify checksum. */
+#define MBC_GET_FIRMWARE_VERSION	8	/* Get firmware revision. */
+#define MBC_LOAD_RISC_RAM		9	/* Load RAM command. */
+#define MBC_DUMP_RISC_RAM		0xa	/* Dump RAM command. */
+#define MBC_LOAD_RISC_RAM_EXTENDED	0xb	/* Load RAM extended. */
+#define MBC_DUMP_RISC_RAM_EXTENDED	0xc	/* Dump RAM extended. */
+#define MBC_WRITE_RAM_WORD_EXTENDED	0xd	/* Write RAM word extended */
+#define MBC_READ_RAM_EXTENDED		0xf	/* Read RAM extended. */
+#define MBC_IOCB_COMMAND		0x12	/* Execute IOCB command. */
+#define MBC_STOP_FIRMWARE		0x14	/* Stop firmware. */
+#define MBC_ABORT_COMMAND		0x15	/* Abort IOCB command. */
+#define MBC_ABORT_DEVICE		0x16	/* Abort device (ID/LUN). */
+#define MBC_ABORT_TARGET		0x17	/* Abort target (ID). */
+#define MBC_RESET			0x18	/* Reset. */
+#define MBC_GET_ADAPTER_LOOP_ID		0x20	/* Get loop id of ISP2200. */
+#define MBC_GET_SET_ZIO_THRESHOLD	0x21	/* Get/SET ZIO THRESHOLD. */
+#define MBC_GET_RETRY_COUNT		0x22	/* Get f/w retry cnt/delay. */
+#define MBC_DISABLE_VI			0x24	/* Disable VI operation. */
+#define MBC_ENABLE_VI			0x25	/* Enable VI operation. */
+#define MBC_GET_FIRMWARE_OPTION		0x28	/* Get Firmware Options. */
+#define MBC_GET_MEM_OFFLOAD_CNTRL_STAT	0x34	/* Memory Offload ctrl/Stat*/
+#define MBC_SET_FIRMWARE_OPTION		0x38	/* Set Firmware Options. */
+#define MBC_LOOP_PORT_BYPASS		0x40	/* Loop Port Bypass. */
+#define MBC_LOOP_PORT_ENABLE		0x41	/* Loop Port Enable. */
+#define MBC_GET_RESOURCE_COUNTS		0x42	/* Get Resource Counts. */
+#define MBC_NON_PARTICIPATE		0x43	/* Non-Participating Mode. */
+#define MBC_DIAGNOSTIC_ECHO		0x44	/* Diagnostic echo. */
+#define MBC_DIAGNOSTIC_LOOP_BACK	0x45	/* Diagnostic loop back. */
+#define MBC_ONLINE_SELF_TEST		0x46	/* Online self-test. */
+#define MBC_ENHANCED_GET_PORT_DATABASE	0x47	/* Get port database + login */
+#define MBC_CONFIGURE_VF		0x4b	/* Configure VFs */
+#define MBC_RESET_LINK_STATUS		0x52	/* Reset Link Error Status */
+#define MBC_IOCB_COMMAND_A64		0x54	/* Execute IOCB command (64) */
+#define MBC_PORT_LOGOUT			0x56	/* Port Logout request */
+#define MBC_SEND_RNID_ELS		0x57	/* Send RNID ELS request */
+#define MBC_SET_RNID_PARAMS		0x59	/* Set RNID parameters */
+#define MBC_GET_RNID_PARAMS		0x5a	/* Get RNID parameters */
+#define MBC_DATA_RATE			0x5d	/* Data Rate */
+#define MBC_INITIALIZE_FIRMWARE		0x60	/* Initialize firmware */
+#define MBC_INITIATE_LIP		0x62	/* Initiate Loop */
+						/* Initialization Procedure */
+#define MBC_GET_FC_AL_POSITION_MAP	0x63	/* Get FC_AL Position Map. */
+#define MBC_GET_PORT_DATABASE		0x64	/* Get Port Database. */
+#define MBC_CLEAR_ACA			0x65	/* Clear ACA. */
+#define MBC_TARGET_RESET		0x66	/* Target Reset. */
+#define MBC_CLEAR_TASK_SET		0x67	/* Clear Task Set. */
+#define MBC_ABORT_TASK_SET		0x68	/* Abort Task Set. */
+#define MBC_GET_FIRMWARE_STATE		0x69	/* Get firmware state. */
+#define MBC_GET_PORT_NAME		0x6a	/* Get port name. */
+#define MBC_GET_LINK_STATUS		0x6b	/* Get port link status. */
+#define MBC_LIP_RESET			0x6c	/* LIP reset. */
+#define MBC_SEND_SNS_COMMAND		0x6e	/* Send Simple Name Server */
+						/* commandd. */
+#define MBC_LOGIN_FABRIC_PORT		0x6f	/* Login fabric port. */
+#define MBC_SEND_CHANGE_REQUEST		0x70	/* Send Change Request. */
+#define MBC_LOGOUT_FABRIC_PORT		0x71	/* Logout fabric port. */
+#define MBC_LIP_FULL_LOGIN		0x72	/* Full login LIP. */
+#define MBC_LOGIN_LOOP_PORT		0x74	/* Login Loop Port. */
+#define MBC_PORT_NODE_NAME_LIST		0x75	/* Get port/node name list. */
+#define MBC_INITIALIZE_RECEIVE_QUEUE	0x77	/* Initialize receive queue */
+#define MBC_UNLOAD_IP			0x79	/* Shutdown IP */
+#define MBC_GET_ID_LIST			0x7C	/* Get Port ID list. */
+#define MBC_SEND_LFA_COMMAND		0x7D	/* Send Loop Fabric Address */
+#define MBC_LUN_RESET			0x7E	/* Send LUN reset */
+
+/*
+ * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
+ * should be defined with MBC_MR_*
+ */
+#define MBC_MR_DRV_SHUTDOWN		0x6A
+
+/*
+ * ISP24xx mailbox commands
+ */
+#define MBC_WRITE_SERDES		0x3	/* Write serdes word. */
+#define MBC_READ_SERDES			0x4	/* Read serdes word. */
+#define MBC_LOAD_DUMP_MPI_RAM		0x5	/* Load/Dump MPI RAM. */
+#define MBC_SERDES_PARAMS		0x10	/* Serdes Tx Parameters. */
+#define MBC_GET_IOCB_STATUS		0x12	/* Get IOCB status command. */
+#define MBC_PORT_PARAMS			0x1A	/* Port iDMA Parameters. */
+#define MBC_GET_TIMEOUT_PARAMS		0x22	/* Get FW timeouts. */
+#define MBC_TRACE_CONTROL		0x27	/* Trace control command. */
+#define MBC_GEN_SYSTEM_ERROR		0x2a	/* Generate System Error. */
+#define MBC_WRITE_SFP			0x30	/* Write SFP Data. */
+#define MBC_READ_SFP			0x31	/* Read SFP Data. */
+#define MBC_SET_TIMEOUT_PARAMS		0x32	/* Set FW timeouts. */
+#define MBC_DPORT_DIAGNOSTICS		0x47	/* D-Port Diagnostics */
+#define MBC_MID_INITIALIZE_FIRMWARE	0x48	/* MID Initialize firmware. */
+#define MBC_MID_GET_VP_DATABASE		0x49	/* MID Get VP Database. */
+#define MBC_MID_GET_VP_ENTRY		0x4a	/* MID Get VP Entry. */
+#define MBC_HOST_MEMORY_COPY		0x53	/* Host Memory Copy. */
+#define MBC_SEND_RNFT_ELS		0x5e	/* Send RNFT ELS request */
+#define MBC_GET_LINK_PRIV_STATS		0x6d	/* Get link & private data. */
+#define MBC_LINK_INITIALIZATION		0x72	/* Do link initialization. */
+#define MBC_SET_VENDOR_ID		0x76	/* Set Vendor ID. */
+#define MBC_PORT_RESET			0x120	/* Port Reset */
+#define MBC_SET_PORT_CONFIG		0x122	/* Set port configuration */
+#define MBC_GET_PORT_CONFIG		0x123	/* Get port configuration */
+
+/*
+ * ISP81xx mailbox commands
+ */
+#define MBC_WRITE_MPI_REGISTER		0x01    /* Write MPI Register. */
+
+/*
+ * ISP8044 mailbox commands
+ */
+#define MBC_SET_GET_ETH_SERDES_REG	0x150
+#define HCS_WRITE_SERDES		0x3
+#define HCS_READ_SERDES			0x4
+
+/* Firmware return data sizes */
+#define FCAL_MAP_SIZE	128
+
+/* Mailbox bit definitions for out_mb and in_mb */
+#define	MBX_31		BIT_31
+#define	MBX_30		BIT_30
+#define	MBX_29		BIT_29
+#define	MBX_28		BIT_28
+#define	MBX_27		BIT_27
+#define	MBX_26		BIT_26
+#define	MBX_25		BIT_25
+#define	MBX_24		BIT_24
+#define	MBX_23		BIT_23
+#define	MBX_22		BIT_22
+#define	MBX_21		BIT_21
+#define	MBX_20		BIT_20
+#define	MBX_19		BIT_19
+#define	MBX_18		BIT_18
+#define	MBX_17		BIT_17
+#define	MBX_16		BIT_16
+#define	MBX_15		BIT_15
+#define	MBX_14		BIT_14
+#define	MBX_13		BIT_13
+#define	MBX_12		BIT_12
+#define	MBX_11		BIT_11
+#define	MBX_10		BIT_10
+#define	MBX_9		BIT_9
+#define	MBX_8		BIT_8
+#define	MBX_7		BIT_7
+#define	MBX_6		BIT_6
+#define	MBX_5		BIT_5
+#define	MBX_4		BIT_4
+#define	MBX_3		BIT_3
+#define	MBX_2		BIT_2
+#define	MBX_1		BIT_1
+#define	MBX_0		BIT_0
+
+#define RNID_TYPE_PORT_LOGIN	0x7
+#define RNID_TYPE_SET_VERSION	0x9
+#define RNID_TYPE_ASIC_TEMP	0xC
+
+/*
+ * Firmware state codes from get firmware state mailbox command
+ */
+#define FSTATE_CONFIG_WAIT      0
+#define FSTATE_WAIT_AL_PA       1
+#define FSTATE_WAIT_LOGIN       2
+#define FSTATE_READY            3
+#define FSTATE_LOSS_OF_SYNC     4
+#define FSTATE_ERROR            5
+#define FSTATE_REINIT           6
+#define FSTATE_NON_PART         7
+
+#define FSTATE_CONFIG_CORRECT      0
+#define FSTATE_P2P_RCV_LIP         1
+#define FSTATE_P2P_CHOOSE_LOOP     2
+#define FSTATE_P2P_RCV_UNIDEN_LIP  3
+#define FSTATE_FATAL_ERROR         4
+#define FSTATE_LOOP_BACK_CONN      5
+
+#define QLA27XX_IMG_STATUS_VER_MAJOR   0x01
+#define QLA27XX_IMG_STATUS_VER_MINOR    0x00
+#define QLA27XX_IMG_STATUS_SIGN   0xFACEFADE
+#define QLA27XX_PRIMARY_IMAGE  1
+#define QLA27XX_SECONDARY_IMAGE    2
+
+/*
+ * Port Database structure definition
+ * Little endian except where noted.
+ */
+#define	PORT_DATABASE_SIZE	128	/* bytes */
+typedef struct {
+	uint8_t options;
+	uint8_t control;
+	uint8_t master_state;
+	uint8_t slave_state;
+	uint8_t reserved[2];
+	uint8_t hard_address;
+	uint8_t reserved_1;
+	uint8_t port_id[4];
+	uint8_t node_name[WWN_SIZE];
+	uint8_t port_name[WWN_SIZE];
+	uint16_t execution_throttle;
+	uint16_t execution_count;
+	uint8_t reset_count;
+	uint8_t reserved_2;
+	uint16_t resource_allocation;
+	uint16_t current_allocation;
+	uint16_t queue_head;
+	uint16_t queue_tail;
+	uint16_t transmit_execution_list_next;
+	uint16_t transmit_execution_list_previous;
+	uint16_t common_features;
+	uint16_t total_concurrent_sequences;
+	uint16_t RO_by_information_category;
+	uint8_t recipient;
+	uint8_t initiator;
+	uint16_t receive_data_size;
+	uint16_t concurrent_sequences;
+	uint16_t open_sequences_per_exchange;
+	uint16_t lun_abort_flags;
+	uint16_t lun_stop_flags;
+	uint16_t stop_queue_head;
+	uint16_t stop_queue_tail;
+	uint16_t port_retry_timer;
+	uint16_t next_sequence_id;
+	uint16_t frame_count;
+	uint16_t PRLI_payload_length;
+	uint8_t prli_svc_param_word_0[2];	/* Big endian */
+						/* Bits 15-0 of word 0 */
+	uint8_t prli_svc_param_word_3[2];	/* Big endian */
+						/* Bits 15-0 of word 3 */
+	uint16_t loop_id;
+	uint16_t extended_lun_info_list_pointer;
+	uint16_t extended_lun_stop_list_pointer;
+} port_database_t;
+
+/*
+ * Port database slave/master states
+ */
+#define PD_STATE_DISCOVERY			0
+#define PD_STATE_WAIT_DISCOVERY_ACK		1
+#define PD_STATE_PORT_LOGIN			2
+#define PD_STATE_WAIT_PORT_LOGIN_ACK		3
+#define PD_STATE_PROCESS_LOGIN			4
+#define PD_STATE_WAIT_PROCESS_LOGIN_ACK		5
+#define PD_STATE_PORT_LOGGED_IN			6
+#define PD_STATE_PORT_UNAVAILABLE		7
+#define PD_STATE_PROCESS_LOGOUT			8
+#define PD_STATE_WAIT_PROCESS_LOGOUT_ACK	9
+#define PD_STATE_PORT_LOGOUT			10
+#define PD_STATE_WAIT_PORT_LOGOUT_ACK		11
+
+
+#define QLA_ZIO_MODE_6		(BIT_2 | BIT_1)
+#define QLA_ZIO_DISABLED	0
+#define QLA_ZIO_DEFAULT_TIMER	2
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define	ICB_VERSION 1
+typedef struct {
+	uint8_t  version;
+	uint8_t  reserved_1;
+
+	/*
+	 * LSB BIT 0  = Enable Hard Loop Id
+	 * LSB BIT 1  = Enable Fairness
+	 * LSB BIT 2  = Enable Full-Duplex
+	 * LSB BIT 3  = Enable Fast Posting
+	 * LSB BIT 4  = Enable Target Mode
+	 * LSB BIT 5  = Disable Initiator Mode
+	 * LSB BIT 6  = Enable ADISC
+	 * LSB BIT 7  = Enable Target Inquiry Data
+	 *
+	 * MSB BIT 0  = Enable PDBC Notify
+	 * MSB BIT 1  = Non Participating LIP
+	 * MSB BIT 2  = Descending Loop ID Search
+	 * MSB BIT 3  = Acquire Loop ID in LIPA
+	 * MSB BIT 4  = Stop PortQ on Full Status
+	 * MSB BIT 5  = Full Login after LIP
+	 * MSB BIT 6  = Node Name Option
+	 * MSB BIT 7  = Ext IFWCB enable bit
+	 */
+	uint8_t  firmware_options[2];
+
+	uint16_t frame_payload_size;
+	uint16_t max_iocb_allocation;
+	uint16_t execution_throttle;
+	uint8_t  retry_count;
+	uint8_t	 retry_delay;			/* unused */
+	uint8_t	 port_name[WWN_SIZE];		/* Big endian. */
+	uint16_t hard_address;
+	uint8_t	 inquiry_data;
+	uint8_t	 login_timeout;
+	uint8_t	 node_name[WWN_SIZE];		/* Big endian. */
+
+	uint16_t request_q_outpointer;
+	uint16_t response_q_inpointer;
+	uint16_t request_q_length;
+	uint16_t response_q_length;
+	uint32_t request_q_address[2];
+	uint32_t response_q_address[2];
+
+	uint16_t lun_enables;
+	uint8_t  command_resource_count;
+	uint8_t  immediate_notify_resource_count;
+	uint16_t timeout;
+	uint8_t  reserved_2[2];
+
+	/*
+	 * LSB BIT 0 = Timer Operation mode bit 0
+	 * LSB BIT 1 = Timer Operation mode bit 1
+	 * LSB BIT 2 = Timer Operation mode bit 2
+	 * LSB BIT 3 = Timer Operation mode bit 3
+	 * LSB BIT 4 = Init Config Mode bit 0
+	 * LSB BIT 5 = Init Config Mode bit 1
+	 * LSB BIT 6 = Init Config Mode bit 2
+	 * LSB BIT 7 = Enable Non part on LIHA failure
+	 *
+	 * MSB BIT 0 = Enable class 2
+	 * MSB BIT 1 = Enable ACK0
+	 * MSB BIT 2 =
+	 * MSB BIT 3 =
+	 * MSB BIT 4 = FC Tape Enable
+	 * MSB BIT 5 = Enable FC Confirm
+	 * MSB BIT 6 = Enable command queuing in target mode
+	 * MSB BIT 7 = No Logo On Link Down
+	 */
+	uint8_t	 add_firmware_options[2];
+
+	uint8_t	 response_accumulation_timer;
+	uint8_t	 interrupt_delay_timer;
+
+	/*
+	 * LSB BIT 0 = Enable Read xfr_rdy
+	 * LSB BIT 1 = Soft ID only
+	 * LSB BIT 2 =
+	 * LSB BIT 3 =
+	 * LSB BIT 4 = FCP RSP Payload [0]
+	 * LSB BIT 5 = FCP RSP Payload [1] / Sbus enable - 2200
+	 * LSB BIT 6 = Enable Out-of-Order frame handling
+	 * LSB BIT 7 = Disable Automatic PLOGI on Local Loop
+	 *
+	 * MSB BIT 0 = Sbus enable - 2300
+	 * MSB BIT 1 =
+	 * MSB BIT 2 =
+	 * MSB BIT 3 =
+	 * MSB BIT 4 = LED mode
+	 * MSB BIT 5 = enable 50 ohm termination
+	 * MSB BIT 6 = Data Rate (2300 only)
+	 * MSB BIT 7 = Data Rate (2300 only)
+	 */
+	uint8_t	 special_options[2];
+
+	uint8_t  reserved_3[26];
+} init_cb_t;
+
+/*
+ * Get Link Status mailbox command return buffer.
+ */
+#define GLSO_SEND_RPS	BIT_0
+#define GLSO_USE_DID	BIT_3
+
+struct link_statistics {
+	uint32_t link_fail_cnt;
+	uint32_t loss_sync_cnt;
+	uint32_t loss_sig_cnt;
+	uint32_t prim_seq_err_cnt;
+	uint32_t inval_xmit_word_cnt;
+	uint32_t inval_crc_cnt;
+	uint32_t lip_cnt;
+	uint32_t link_up_cnt;
+	uint32_t link_down_loop_init_tmo;
+	uint32_t link_down_los;
+	uint32_t link_down_loss_rcv_clk;
+	uint32_t reserved0[5];
+	uint32_t port_cfg_chg;
+	uint32_t reserved1[11];
+	uint32_t rsp_q_full;
+	uint32_t atio_q_full;
+	uint32_t drop_ae;
+	uint32_t els_proto_err;
+	uint32_t reserved2;
+	uint32_t tx_frames;
+	uint32_t rx_frames;
+	uint32_t discarded_frames;
+	uint32_t dropped_frames;
+	uint32_t reserved3;
+	uint32_t nos_rcvd;
+	uint32_t reserved4[4];
+	uint32_t tx_prjt;
+	uint32_t rcv_exfail;
+	uint32_t rcv_abts;
+	uint32_t seq_frm_miss;
+	uint32_t corr_err;
+	uint32_t mb_rqst;
+	uint32_t nport_full;
+	uint32_t eofa;
+	uint32_t reserved5;
+	uint32_t fpm_recv_word_cnt_lo;
+	uint32_t fpm_recv_word_cnt_hi;
+	uint32_t fpm_disc_word_cnt_lo;
+	uint32_t fpm_disc_word_cnt_hi;
+	uint32_t fpm_xmit_word_cnt_lo;
+	uint32_t fpm_xmit_word_cnt_hi;
+	uint32_t reserved6[70];
+};
+
+/*
+ * NVRAM Command values.
+ */
+#define NV_START_BIT            BIT_2
+#define NV_WRITE_OP             (BIT_26+BIT_24)
+#define NV_READ_OP              (BIT_26+BIT_25)
+#define NV_ERASE_OP             (BIT_26+BIT_25+BIT_24)
+#define NV_MASK_OP              (BIT_26+BIT_25+BIT_24)
+#define NV_DELAY_COUNT          10
+
+/*
+ * QLogic ISP2100, ISP2200 and ISP2300 NVRAM structure definition.
+ */
+typedef struct {
+	/*
+	 * NVRAM header
+	 */
+	uint8_t	id[4];
+	uint8_t	nvram_version;
+	uint8_t	reserved_0;
+
+	/*
+	 * NVRAM RISC parameter block
+	 */
+	uint8_t	parameter_block_version;
+	uint8_t	reserved_1;
+
+	/*
+	 * LSB BIT 0  = Enable Hard Loop Id
+	 * LSB BIT 1  = Enable Fairness
+	 * LSB BIT 2  = Enable Full-Duplex
+	 * LSB BIT 3  = Enable Fast Posting
+	 * LSB BIT 4  = Enable Target Mode
+	 * LSB BIT 5  = Disable Initiator Mode
+	 * LSB BIT 6  = Enable ADISC
+	 * LSB BIT 7  = Enable Target Inquiry Data
+	 *
+	 * MSB BIT 0  = Enable PDBC Notify
+	 * MSB BIT 1  = Non Participating LIP
+	 * MSB BIT 2  = Descending Loop ID Search
+	 * MSB BIT 3  = Acquire Loop ID in LIPA
+	 * MSB BIT 4  = Stop PortQ on Full Status
+	 * MSB BIT 5  = Full Login after LIP
+	 * MSB BIT 6  = Node Name Option
+	 * MSB BIT 7  = Ext IFWCB enable bit
+	 */
+	uint8_t	 firmware_options[2];
+
+	uint16_t frame_payload_size;
+	uint16_t max_iocb_allocation;
+	uint16_t execution_throttle;
+	uint8_t	 retry_count;
+	uint8_t	 retry_delay;			/* unused */
+	uint8_t	 port_name[WWN_SIZE];		/* Big endian. */
+	uint16_t hard_address;
+	uint8_t	 inquiry_data;
+	uint8_t	 login_timeout;
+	uint8_t	 node_name[WWN_SIZE];		/* Big endian. */
+
+	/*
+	 * LSB BIT 0 = Timer Operation mode bit 0
+	 * LSB BIT 1 = Timer Operation mode bit 1
+	 * LSB BIT 2 = Timer Operation mode bit 2
+	 * LSB BIT 3 = Timer Operation mode bit 3
+	 * LSB BIT 4 = Init Config Mode bit 0
+	 * LSB BIT 5 = Init Config Mode bit 1
+	 * LSB BIT 6 = Init Config Mode bit 2
+	 * LSB BIT 7 = Enable Non part on LIHA failure
+	 *
+	 * MSB BIT 0 = Enable class 2
+	 * MSB BIT 1 = Enable ACK0
+	 * MSB BIT 2 =
+	 * MSB BIT 3 =
+	 * MSB BIT 4 = FC Tape Enable
+	 * MSB BIT 5 = Enable FC Confirm
+	 * MSB BIT 6 = Enable command queuing in target mode
+	 * MSB BIT 7 = No Logo On Link Down
+	 */
+	uint8_t	 add_firmware_options[2];
+
+	uint8_t	 response_accumulation_timer;
+	uint8_t	 interrupt_delay_timer;
+
+	/*
+	 * LSB BIT 0 = Enable Read xfr_rdy
+	 * LSB BIT 1 = Soft ID only
+	 * LSB BIT 2 =
+	 * LSB BIT 3 =
+	 * LSB BIT 4 = FCP RSP Payload [0]
+	 * LSB BIT 5 = FCP RSP Payload [1] / Sbus enable - 2200
+	 * LSB BIT 6 = Enable Out-of-Order frame handling
+	 * LSB BIT 7 = Disable Automatic PLOGI on Local Loop
+	 *
+	 * MSB BIT 0 = Sbus enable - 2300
+	 * MSB BIT 1 =
+	 * MSB BIT 2 =
+	 * MSB BIT 3 =
+	 * MSB BIT 4 = LED mode
+	 * MSB BIT 5 = enable 50 ohm termination
+	 * MSB BIT 6 = Data Rate (2300 only)
+	 * MSB BIT 7 = Data Rate (2300 only)
+	 */
+	uint8_t	 special_options[2];
+
+	/* Reserved for expanded RISC parameter block */
+	uint8_t reserved_2[22];
+
+	/*
+	 * LSB BIT 0 = Tx Sensitivity 1G bit 0
+	 * LSB BIT 1 = Tx Sensitivity 1G bit 1
+	 * LSB BIT 2 = Tx Sensitivity 1G bit 2
+	 * LSB BIT 3 = Tx Sensitivity 1G bit 3
+	 * LSB BIT 4 = Rx Sensitivity 1G bit 0
+	 * LSB BIT 5 = Rx Sensitivity 1G bit 1
+	 * LSB BIT 6 = Rx Sensitivity 1G bit 2
+	 * LSB BIT 7 = Rx Sensitivity 1G bit 3
+	 *
+	 * MSB BIT 0 = Tx Sensitivity 2G bit 0
+	 * MSB BIT 1 = Tx Sensitivity 2G bit 1
+	 * MSB BIT 2 = Tx Sensitivity 2G bit 2
+	 * MSB BIT 3 = Tx Sensitivity 2G bit 3
+	 * MSB BIT 4 = Rx Sensitivity 2G bit 0
+	 * MSB BIT 5 = Rx Sensitivity 2G bit 1
+	 * MSB BIT 6 = Rx Sensitivity 2G bit 2
+	 * MSB BIT 7 = Rx Sensitivity 2G bit 3
+	 *
+	 * LSB BIT 0 = Output Swing 1G bit 0
+	 * LSB BIT 1 = Output Swing 1G bit 1
+	 * LSB BIT 2 = Output Swing 1G bit 2
+	 * LSB BIT 3 = Output Emphasis 1G bit 0
+	 * LSB BIT 4 = Output Emphasis 1G bit 1
+	 * LSB BIT 5 = Output Swing 2G bit 0
+	 * LSB BIT 6 = Output Swing 2G bit 1
+	 * LSB BIT 7 = Output Swing 2G bit 2
+	 *
+	 * MSB BIT 0 = Output Emphasis 2G bit 0
+	 * MSB BIT 1 = Output Emphasis 2G bit 1
+	 * MSB BIT 2 = Output Enable
+	 * MSB BIT 3 =
+	 * MSB BIT 4 =
+	 * MSB BIT 5 =
+	 * MSB BIT 6 =
+	 * MSB BIT 7 =
+	 */
+	uint8_t seriallink_options[4];
+
+	/*
+	 * NVRAM host parameter block
+	 *
+	 * LSB BIT 0 = Enable spinup delay
+	 * LSB BIT 1 = Disable BIOS
+	 * LSB BIT 2 = Enable Memory Map BIOS
+	 * LSB BIT 3 = Enable Selectable Boot
+	 * LSB BIT 4 = Disable RISC code load
+	 * LSB BIT 5 = Set cache line size 1
+	 * LSB BIT 6 = PCI Parity Disable
+	 * LSB BIT 7 = Enable extended logging
+	 *
+	 * MSB BIT 0 = Enable 64bit addressing
+	 * MSB BIT 1 = Enable lip reset
+	 * MSB BIT 2 = Enable lip full login
+	 * MSB BIT 3 = Enable target reset
+	 * MSB BIT 4 = Enable database storage
+	 * MSB BIT 5 = Enable cache flush read
+	 * MSB BIT 6 = Enable database load
+	 * MSB BIT 7 = Enable alternate WWN
+	 */
+	uint8_t host_p[2];
+
+	uint8_t boot_node_name[WWN_SIZE];
+	uint8_t boot_lun_number;
+	uint8_t reset_delay;
+	uint8_t port_down_retry_count;
+	uint8_t boot_id_number;
+	uint16_t max_luns_per_target;
+	uint8_t fcode_boot_port_name[WWN_SIZE];
+	uint8_t alternate_port_name[WWN_SIZE];
+	uint8_t alternate_node_name[WWN_SIZE];
+
+	/*
+	 * BIT 0 = Selective Login
+	 * BIT 1 = Alt-Boot Enable
+	 * BIT 2 =
+	 * BIT 3 = Boot Order List
+	 * BIT 4 =
+	 * BIT 5 = Selective LUN
+	 * BIT 6 =
+	 * BIT 7 = unused
+	 */
+	uint8_t efi_parameters;
+
+	uint8_t link_down_timeout;
+
+	uint8_t adapter_id[16];
+
+	uint8_t alt1_boot_node_name[WWN_SIZE];
+	uint16_t alt1_boot_lun_number;
+	uint8_t alt2_boot_node_name[WWN_SIZE];
+	uint16_t alt2_boot_lun_number;
+	uint8_t alt3_boot_node_name[WWN_SIZE];
+	uint16_t alt3_boot_lun_number;
+	uint8_t alt4_boot_node_name[WWN_SIZE];
+	uint16_t alt4_boot_lun_number;
+	uint8_t alt5_boot_node_name[WWN_SIZE];
+	uint16_t alt5_boot_lun_number;
+	uint8_t alt6_boot_node_name[WWN_SIZE];
+	uint16_t alt6_boot_lun_number;
+	uint8_t alt7_boot_node_name[WWN_SIZE];
+	uint16_t alt7_boot_lun_number;
+
+	uint8_t reserved_3[2];
+
+	/* Offset 200-215 : Model Number */
+	uint8_t model_number[16];
+
+	/* OEM related items */
+	uint8_t oem_specific[16];
+
+	/*
+	 * NVRAM Adapter Features offset 232-239
+	 *
+	 * LSB BIT 0 = External GBIC
+	 * LSB BIT 1 = Risc RAM parity
+	 * LSB BIT 2 = Buffer Plus Module
+	 * LSB BIT 3 = Multi Chip Adapter
+	 * LSB BIT 4 = Internal connector
+	 * LSB BIT 5 =
+	 * LSB BIT 6 =
+	 * LSB BIT 7 =
+	 *
+	 * MSB BIT 0 =
+	 * MSB BIT 1 =
+	 * MSB BIT 2 =
+	 * MSB BIT 3 =
+	 * MSB BIT 4 =
+	 * MSB BIT 5 =
+	 * MSB BIT 6 =
+	 * MSB BIT 7 =
+	 */
+	uint8_t	adapter_features[2];
+
+	uint8_t reserved_4[16];
+
+	/* Subsystem vendor ID for ISP2200 */
+	uint16_t subsystem_vendor_id_2200;
+
+	/* Subsystem device ID for ISP2200 */
+	uint16_t subsystem_device_id_2200;
+
+	uint8_t	 reserved_5;
+	uint8_t	 checksum;
+} nvram_t;
+
+/*
+ * ISP queue - response queue entry definition.
+ */
+typedef struct {
+	uint8_t		entry_type;		/* Entry type. */
+	uint8_t		entry_count;		/* Entry count. */
+	uint8_t		sys_define;		/* System defined. */
+	uint8_t		entry_status;		/* Entry Status. */
+	uint32_t	handle;			/* System defined handle */
+	uint8_t		data[52];
+	uint32_t	signature;
+#define RESPONSE_PROCESSED	0xDEADDEAD	/* Signature */
+} response_t;
+
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+	uint8_t		entry_type;		/* Entry type. */
+	uint8_t		entry_count;		/* Entry count. */
+	__le16		attr_n_length;
+	uint8_t		data[56];
+	uint32_t	signature;
+#define ATIO_PROCESSED 0xDEADDEAD		/* Signature */
+};
+
+typedef union {
+	uint16_t extended;
+	struct {
+		uint8_t reserved;
+		uint8_t standard;
+	} id;
+} target_id_t;
+
+#define SET_TARGET_ID(ha, to, from)			\
+do {							\
+	if (HAS_EXTENDED_IDS(ha))			\
+		to.extended = cpu_to_le16(from);	\
+	else						\
+		to.id.standard = (uint8_t)from;		\
+} while (0)
+
+/*
+ * ISP queue - command entry structure definition.
+ */
+#define COMMAND_TYPE	0x11		/* Command entry */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t handle;		/* System handle. */
+	target_id_t target;		/* SCSI ID */
+	uint16_t lun;			/* SCSI LUN */
+	uint16_t control_flags;		/* Control flags. */
+#define CF_WRITE	BIT_6
+#define CF_READ		BIT_5
+#define CF_SIMPLE_TAG	BIT_3
+#define CF_ORDERED_TAG	BIT_2
+#define CF_HEAD_TAG	BIT_1
+	uint16_t reserved_1;
+	uint16_t timeout;		/* Command timeout. */
+	uint16_t dseg_count;		/* Data segment count. */
+	uint8_t scsi_cdb[MAX_CMDSZ]; 	/* SCSI command words. */
+	uint32_t byte_count;		/* Total byte count. */
+	uint32_t dseg_0_address;	/* Data segment 0 address. */
+	uint32_t dseg_0_length;		/* Data segment 0 length. */
+	uint32_t dseg_1_address;	/* Data segment 1 address. */
+	uint32_t dseg_1_length;		/* Data segment 1 length. */
+	uint32_t dseg_2_address;	/* Data segment 2 address. */
+	uint32_t dseg_2_length;		/* Data segment 2 length. */
+} cmd_entry_t;
+
+/*
+ * ISP queue - 64-Bit addressing, command entry structure definition.
+ */
+#define COMMAND_A64_TYPE	0x19	/* Command A64 entry */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t handle;		/* System handle. */
+	target_id_t target;		/* SCSI ID */
+	uint16_t lun;			/* SCSI LUN */
+	uint16_t control_flags;		/* Control flags. */
+	uint16_t reserved_1;
+	uint16_t timeout;		/* Command timeout. */
+	uint16_t dseg_count;		/* Data segment count. */
+	uint8_t scsi_cdb[MAX_CMDSZ];	/* SCSI command words. */
+	uint32_t byte_count;		/* Total byte count. */
+	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_0_length;		/* Data segment 0 length. */
+	uint32_t dseg_1_address[2];	/* Data segment 1 address. */
+	uint32_t dseg_1_length;		/* Data segment 1 length. */
+} cmd_a64_entry_t, request_t;
+
+/*
+ * ISP queue - continuation entry structure definition.
+ */
+#define CONTINUE_TYPE		0x02	/* Continuation entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t reserved;
+	uint32_t dseg_0_address;	/* Data segment 0 address. */
+	uint32_t dseg_0_length;		/* Data segment 0 length. */
+	uint32_t dseg_1_address;	/* Data segment 1 address. */
+	uint32_t dseg_1_length;		/* Data segment 1 length. */
+	uint32_t dseg_2_address;	/* Data segment 2 address. */
+	uint32_t dseg_2_length;		/* Data segment 2 length. */
+	uint32_t dseg_3_address;	/* Data segment 3 address. */
+	uint32_t dseg_3_length;		/* Data segment 3 length. */
+	uint32_t dseg_4_address;	/* Data segment 4 address. */
+	uint32_t dseg_4_length;		/* Data segment 4 length. */
+	uint32_t dseg_5_address;	/* Data segment 5 address. */
+	uint32_t dseg_5_length;		/* Data segment 5 length. */
+	uint32_t dseg_6_address;	/* Data segment 6 address. */
+	uint32_t dseg_6_length;		/* Data segment 6 length. */
+} cont_entry_t;
+
+/*
+ * ISP queue - 64-Bit addressing, continuation entry structure definition.
+ */
+#define CONTINUE_A64_TYPE	0x0A	/* Continuation A64 entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_0_length;		/* Data segment 0 length. */
+	uint32_t dseg_1_address[2];	/* Data segment 1 address. */
+	uint32_t dseg_1_length;		/* Data segment 1 length. */
+	uint32_t dseg_2_address	[2];	/* Data segment 2 address. */
+	uint32_t dseg_2_length;		/* Data segment 2 length. */
+	uint32_t dseg_3_address[2];	/* Data segment 3 address. */
+	uint32_t dseg_3_length;		/* Data segment 3 length. */
+	uint32_t dseg_4_address[2];	/* Data segment 4 address. */
+	uint32_t dseg_4_length;		/* Data segment 4 length. */
+} cont_a64_entry_t;
+
+#define PO_MODE_DIF_INSERT	0
+#define PO_MODE_DIF_REMOVE	1
+#define PO_MODE_DIF_PASS	2
+#define PO_MODE_DIF_REPLACE	3
+#define PO_MODE_DIF_TCP_CKSUM	6
+#define PO_ENABLE_INCR_GUARD_SEED	BIT_3
+#define PO_DISABLE_GUARD_CHECK	BIT_4
+#define PO_DISABLE_INCR_REF_TAG	BIT_5
+#define PO_DIS_HEADER_MODE	BIT_7
+#define PO_ENABLE_DIF_BUNDLING	BIT_8
+#define PO_DIS_FRAME_MODE	BIT_9
+#define PO_DIS_VALD_APP_ESC	BIT_10 /* Dis validation for escape tag/ffffh */
+#define PO_DIS_VALD_APP_REF_ESC BIT_11
+
+#define PO_DIS_APP_TAG_REPL	BIT_12 /* disable REG Tag replacement */
+#define PO_DIS_REF_TAG_REPL	BIT_13
+#define PO_DIS_APP_TAG_VALD	BIT_14 /* disable REF Tag validation */
+#define PO_DIS_REF_TAG_VALD	BIT_15
+
+/*
+ * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
+ */
+struct crc_context {
+	uint32_t handle;		/* System handle. */
+	__le32 ref_tag;
+	__le16 app_tag;
+	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
+	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
+	__le16 guard_seed;		/* Initial Guard Seed */
+	__le16 prot_opts;		/* Requested Data Protection Mode */
+	__le16 blk_size;		/* Data size in bytes */
+	uint16_t runt_blk_guard;	/* Guard value for runt block (tape
+					 * only) */
+	__le32 byte_count;		/* Total byte count/ total data
+					 * transfer count */
+	union {
+		struct {
+			uint32_t	reserved_1;
+			uint16_t	reserved_2;
+			uint16_t	reserved_3;
+			uint32_t	reserved_4;
+			uint32_t	data_address[2];
+			uint32_t	data_length;
+			uint32_t	reserved_5[2];
+			uint32_t	reserved_6;
+		} nobundling;
+		struct {
+			__le32	dif_byte_count;	/* Total DIF byte
+							 * count */
+			uint16_t	reserved_1;
+			__le16	dseg_count;	/* Data segment count */
+			uint32_t	reserved_2;
+			uint32_t	data_address[2];
+			uint32_t	data_length;
+			uint32_t	dif_address[2];
+			uint32_t	dif_length;	/* Data segment 0
+							 * length */
+		} bundling;
+	} u;
+
+	struct fcp_cmnd	fcp_cmnd;
+	dma_addr_t	crc_ctx_dma;
+	/* List of DMA context transfers */
+	struct list_head dsd_list;
+
+	/* This structure should not exceed 512 bytes */
+};
+
+#define CRC_CONTEXT_LEN_FW	(offsetof(struct crc_context, fcp_cmnd.lun))
+#define CRC_CONTEXT_FCPCMND_OFF	(offsetof(struct crc_context, fcp_cmnd.lun))
+
+/*
+ * ISP queue - status entry structure definition.
+ */
+#define	STATUS_TYPE	0x03		/* Status entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t handle;		/* System handle. */
+	uint16_t scsi_status;		/* SCSI status. */
+	uint16_t comp_status;		/* Completion status. */
+	uint16_t state_flags;		/* State flags. */
+	uint16_t status_flags;		/* Status flags. */
+	uint16_t rsp_info_len;		/* Response Info Length. */
+	uint16_t req_sense_length;	/* Request sense data length. */
+	uint32_t residual_length;	/* Residual transfer length. */
+	uint8_t rsp_info[8];		/* FCP response information. */
+	uint8_t req_sense_data[32];	/* Request sense data. */
+} sts_entry_t;
+
+/*
+ * Status entry entry status
+ */
+#define RF_RQ_DMA_ERROR	BIT_6		/* Request Queue DMA error. */
+#define RF_INV_E_ORDER	BIT_5		/* Invalid entry order. */
+#define RF_INV_E_COUNT	BIT_4		/* Invalid entry count. */
+#define RF_INV_E_PARAM	BIT_3		/* Invalid entry parameter. */
+#define RF_INV_E_TYPE	BIT_2		/* Invalid entry type. */
+#define RF_BUSY		BIT_1		/* Busy */
+#define RF_MASK		(RF_RQ_DMA_ERROR | RF_INV_E_ORDER | RF_INV_E_COUNT | \
+			 RF_INV_E_PARAM | RF_INV_E_TYPE | RF_BUSY)
+#define RF_MASK_24XX	(RF_INV_E_ORDER | RF_INV_E_COUNT | RF_INV_E_PARAM | \
+			 RF_INV_E_TYPE)
+
+/*
+ * Status entry SCSI status bit definitions.
+ */
+#define SS_MASK				0xfff	/* Reserved bits BIT_12-BIT_15*/
+#define SS_RESIDUAL_UNDER		BIT_11
+#define SS_RESIDUAL_OVER		BIT_10
+#define SS_SENSE_LEN_VALID		BIT_9
+#define SS_RESPONSE_INFO_LEN_VALID	BIT_8
+#define SS_SCSI_STATUS_BYTE	0xff
+
+#define SS_RESERVE_CONFLICT		(BIT_4 | BIT_3)
+#define SS_BUSY_CONDITION		BIT_3
+#define SS_CONDITION_MET		BIT_2
+#define SS_CHECK_CONDITION		BIT_1
+
+/*
+ * Status entry completion status
+ */
+#define CS_COMPLETE		0x0	/* No errors */
+#define CS_INCOMPLETE		0x1	/* Incomplete transfer of cmd. */
+#define CS_DMA			0x2	/* A DMA direction error. */
+#define CS_TRANSPORT		0x3	/* Transport error. */
+#define CS_RESET		0x4	/* SCSI bus reset occurred */
+#define CS_ABORTED		0x5	/* System aborted command. */
+#define CS_TIMEOUT		0x6	/* Timeout error. */
+#define CS_DATA_OVERRUN		0x7	/* Data overrun. */
+#define CS_DIF_ERROR		0xC	/* DIF error detected  */
+
+#define CS_DATA_UNDERRUN	0x15	/* Data Underrun. */
+#define CS_QUEUE_FULL		0x1C	/* Queue Full. */
+#define CS_PORT_UNAVAILABLE	0x28	/* Port unavailable */
+					/* (selection timeout) */
+#define CS_PORT_LOGGED_OUT	0x29	/* Port Logged Out */
+#define CS_PORT_CONFIG_CHG	0x2A	/* Port Configuration Changed */
+#define CS_PORT_BUSY		0x2B	/* Port Busy */
+#define CS_COMPLETE_CHKCOND	0x30	/* Error? */
+#define CS_IOCB_ERROR		0x31	/* Generic error for IOCB request
+					   failure */
+#define CS_BAD_PAYLOAD		0x80	/* Driver defined */
+#define CS_UNKNOWN		0x81	/* Driver defined */
+#define CS_RETRY		0x82	/* Driver defined */
+#define CS_LOOP_DOWN_ABORT	0x83	/* Driver defined */
+
+#define CS_BIDIR_RD_OVERRUN			0x700
+#define CS_BIDIR_RD_WR_OVERRUN			0x707
+#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN		0x715
+#define CS_BIDIR_RD_UNDERRUN			0x1500
+#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN		0x1507
+#define CS_BIDIR_RD_WR_UNDERRUN			0x1515
+#define CS_BIDIR_DMA				0x200
+/*
+ * Status entry status flags
+ */
+#define SF_ABTS_TERMINATED	BIT_10
+#define SF_LOGOUT_SENT		BIT_13
+
+/*
+ * ISP queue - status continuation entry structure definition.
+ */
+#define	STATUS_CONT_TYPE	0x10	/* Status continuation entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint8_t data[60];		/* data */
+} sts_cont_entry_t;
+
+/*
+ * ISP queue -	RIO Type 1 status entry (32 bit I/O entry handles)
+ *		structure definition.
+ */
+#define	STATUS_TYPE_21 0x21		/* Status entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t handle[15];		/* System handles. */
+} sts21_entry_t;
+
+/*
+ * ISP queue -	RIO Type 2 status entry (16 bit I/O entry handles)
+ *		structure definition.
+ */
+#define	STATUS_TYPE_22	0x22		/* Status entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint16_t handle[30];		/* System handles. */
+} sts22_entry_t;
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+#define MARKER_TYPE	0x04		/* Marker entry. */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t sys_define_2;		/* System defined. */
+	target_id_t target;		/* SCSI ID */
+	uint8_t modifier;		/* Modifier (7-0). */
+#define MK_SYNC_ID_LUN	0		/* Synchronize ID/LUN */
+#define MK_SYNC_ID	1		/* Synchronize ID */
+#define MK_SYNC_ALL	2		/* Synchronize all ID/LUN */
+#define MK_SYNC_LIP	3		/* Synchronize all ID/LUN, */
+					/* clear port changed, */
+					/* use sequence number. */
+	uint8_t reserved_1;
+	uint16_t sequence_number;	/* Sequence number of event */
+	uint16_t lun;			/* SCSI LUN */
+	uint8_t reserved_2[48];
+} mrk_entry_t;
+
+/*
+ * ISP queue - Management Server entry structure definition.
+ */
+#define MS_IOCB_TYPE		0x29	/* Management Server IOCB entry */
+typedef struct {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t handle1;		/* System handle. */
+	target_id_t loop_id;
+	uint16_t status;
+	uint16_t control_flags;		/* Control flags. */
+	uint16_t reserved2;
+	uint16_t timeout;
+	uint16_t cmd_dsd_count;
+	uint16_t total_dsd_count;
+	uint8_t type;
+	uint8_t r_ctl;
+	uint16_t rx_id;
+	uint16_t reserved3;
+	uint32_t handle2;
+	uint32_t rsp_bytecount;
+	uint32_t req_bytecount;
+	uint32_t dseg_req_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_req_length;	/* Data segment 0 length. */
+	uint32_t dseg_rsp_address[2];	/* Data segment 1 address. */
+	uint32_t dseg_rsp_length;	/* Data segment 1 length. */
+} ms_iocb_entry_t;
+
+
+/*
+ * ISP queue - Mailbox Command entry structure definition.
+ */
+#define MBX_IOCB_TYPE	0x39
+struct mbx_entry {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_define1;
+	/* Use sys_define1 for source type */
+#define SOURCE_SCSI	0x00
+#define SOURCE_IP	0x01
+#define SOURCE_VI	0x02
+#define SOURCE_SCTP	0x03
+#define SOURCE_MP	0x04
+#define SOURCE_MPIOCTL	0x05
+#define SOURCE_ASYNC_IOCB 0x07
+
+	uint8_t entry_status;
+
+	uint32_t handle;
+	target_id_t loop_id;
+
+	uint16_t status;
+	uint16_t state_flags;
+	uint16_t status_flags;
+
+	uint32_t sys_define2[2];
+
+	uint16_t mb0;
+	uint16_t mb1;
+	uint16_t mb2;
+	uint16_t mb3;
+	uint16_t mb6;
+	uint16_t mb7;
+	uint16_t mb9;
+	uint16_t mb10;
+	uint32_t reserved_2[2];
+	uint8_t node_name[WWN_SIZE];
+	uint8_t port_name[WWN_SIZE];
+};
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D		/* Immediate notify entry. */
+/*
+ * ISP queue -	immediate notify entry structure definition.
+ *		This is sent by the ISP to the Target driver.
+ *		This IOCB would have report of events sent by the
+ *		initiator, that needs to be handled by the target
+ *		driver immediately.
+ */
+struct imm_ntfy_from_isp {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	union {
+		struct {
+			uint32_t sys_define_2; /* System defined. */
+			target_id_t target;
+			uint16_t lun;
+			uint8_t  target_id;
+			uint8_t  reserved_1;
+			uint16_t status_modifier;
+			uint16_t status;
+			uint16_t task_flags;
+			uint16_t seq_id;
+			uint16_t srr_rx_id;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+#define SRR_IU_DATA_IN	0x1
+#define SRR_IU_DATA_OUT	0x5
+#define SRR_IU_STATUS	0x7
+			uint16_t srr_ox_id;
+			uint8_t reserved_2[28];
+		} isp2x;
+		struct {
+			uint32_t reserved;
+			uint16_t nport_handle;
+			uint16_t reserved_2;
+			uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO   BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB     BIT_0
+			uint16_t srr_rx_id;
+			uint16_t status;
+			uint8_t  status_subcode;
+			uint8_t  fw_handle;
+			uint32_t exchange_address;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+			uint16_t srr_ox_id;
+			union {
+				struct {
+					uint8_t node_name[8];
+				} plogi; /* PLOGI/ADISC/PDISC */
+				struct {
+					/* PRLI word 3 bit 0-15 */
+					uint16_t wd3_lo;
+					uint8_t resv0[6];
+				} prli;
+				struct {
+					uint8_t port_id[3];
+					uint8_t resv1;
+					uint16_t nport_handle;
+					uint16_t resv2;
+				} req_els;
+			} u;
+			uint8_t port_name[8];
+			uint8_t resv3[3];
+			uint8_t  vp_index;
+			uint32_t reserved_5;
+			uint8_t  port_id[3];
+			uint8_t  reserved_6;
+		} isp24;
+	} u;
+	uint16_t reserved_7;
+	uint16_t ox_id;
+} __packed;
+#endif
+
+/*
+ * ISP request and response queue entry sizes
+ */
+#define RESPONSE_ENTRY_SIZE	(sizeof(response_t))
+#define REQUEST_ENTRY_SIZE	(sizeof(request_t))
+
+
+
+/*
+ * Switch info gathering structure.
+ */
+typedef struct {
+	port_id_t d_id;
+	uint8_t node_name[WWN_SIZE];
+	uint8_t port_name[WWN_SIZE];
+	uint8_t fabric_port_name[WWN_SIZE];
+	uint16_t fp_speed;
+	uint8_t fc4_type;
+	uint8_t fc4f_nvme;	/* nvme fc4 feature bits */
+} sw_info_t;
+
+/* FCP-4 types */
+#define FC4_TYPE_FCP_SCSI	0x08
+#define FC4_TYPE_OTHER		0x0
+#define FC4_TYPE_UNKNOWN	0xff
+
+/* mailbox command 4G & above */
+struct mbx_24xx_entry {
+	uint8_t		entry_type;
+	uint8_t		entry_count;
+	uint8_t		sys_define1;
+	uint8_t		entry_status;
+	uint32_t	handle;
+	uint16_t	mb[28];
+};
+
+#define IOCB_SIZE 64
+
+/*
+ * Fibre channel port type.
+ */
+typedef enum {
+	FCT_UNKNOWN,
+	FCT_RSCN,
+	FCT_SWITCH,
+	FCT_BROADCAST,
+	FCT_INITIATOR,
+	FCT_TARGET,
+	FCT_NVME
+} fc_port_type_t;
+
+enum qla_sess_deletion {
+	QLA_SESS_DELETION_NONE		= 0,
+	QLA_SESS_DELETION_IN_PROGRESS,
+	QLA_SESS_DELETED,
+};
+
+enum qlt_plogi_link_t {
+	QLT_PLOGI_LINK_SAME_WWN,
+	QLT_PLOGI_LINK_CONFLICT,
+	QLT_PLOGI_LINK_MAX
+};
+
+struct qlt_plogi_ack_t {
+	struct list_head	list;
+	struct imm_ntfy_from_isp iocb;
+	port_id_t	id;
+	int		ref_count;
+	void		*fcport;
+};
+
+struct ct_sns_desc {
+	struct ct_sns_pkt	*ct_sns;
+	dma_addr_t		ct_sns_dma;
+};
+
+enum discovery_state {
+	DSC_DELETED,
+	DSC_GID_PN,
+	DSC_GNL,
+	DSC_LOGIN_PEND,
+	DSC_LOGIN_FAILED,
+	DSC_GPDB,
+	DSC_GPSC,
+	DSC_UPD_FCPORT,
+	DSC_LOGIN_COMPLETE,
+	DSC_DELETE_PEND,
+};
+
+enum login_state {	/* FW control Target side */
+	DSC_LS_LLIOCB_SENT = 2,
+	DSC_LS_PLOGI_PEND,
+	DSC_LS_PLOGI_COMP,
+	DSC_LS_PRLI_PEND,
+	DSC_LS_PRLI_COMP,
+	DSC_LS_PORT_UNAVAIL,
+	DSC_LS_PRLO_PEND = 9,
+	DSC_LS_LOGO_PEND,
+};
+
+enum fcport_mgt_event {
+	FCME_RELOGIN = 1,
+	FCME_RSCN,
+	FCME_GIDPN_DONE,
+	FCME_PLOGI_DONE,	/* Initiator side sent LLIOCB */
+	FCME_PRLI_DONE,
+	FCME_GNL_DONE,
+	FCME_GPSC_DONE,
+	FCME_GPDB_DONE,
+	FCME_GPNID_DONE,
+	FCME_GFFID_DONE,
+	FCME_DELETE_DONE,
+};
+
+enum rscn_addr_format {
+	RSCN_PORT_ADDR,
+	RSCN_AREA_ADDR,
+	RSCN_DOM_ADDR,
+	RSCN_FAB_ADDR,
+};
+
+/*
+ * Fibre channel port structure.
+ */
+typedef struct fc_port {
+	struct list_head list;
+	struct scsi_qla_host *vha;
+
+	uint8_t node_name[WWN_SIZE];
+	uint8_t port_name[WWN_SIZE];
+	port_id_t d_id;
+	uint16_t loop_id;
+	uint16_t old_loop_id;
+
+	unsigned int conf_compl_supported:1;
+	unsigned int deleted:2;
+	unsigned int local:1;
+	unsigned int logout_on_delete:1;
+	unsigned int logo_ack_needed:1;
+	unsigned int keep_nport_handle:1;
+	unsigned int send_els_logo:1;
+	unsigned int login_pause:1;
+	unsigned int login_succ:1;
+
+	struct work_struct nvme_del_work;
+	struct completion nvme_del_done;
+	uint32_t nvme_prli_service_param;
+#define NVME_PRLI_SP_CONF       BIT_7
+#define NVME_PRLI_SP_INITIATOR  BIT_5
+#define NVME_PRLI_SP_TARGET     BIT_4
+#define NVME_PRLI_SP_DISCOVERY  BIT_3
+	uint8_t nvme_flag;
+#define NVME_FLAG_REGISTERED 4
+
+	struct fc_port *conflict;
+	unsigned char logout_completed;
+	int generation;
+
+	struct se_session *se_sess;
+	struct kref sess_kref;
+	struct qla_tgt *tgt;
+	unsigned long expires;
+	struct list_head del_list_entry;
+	struct work_struct free_work;
+
+	struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
+
+	uint16_t tgt_id;
+	uint16_t old_tgt_id;
+
+	uint8_t fcp_prio;
+
+	uint8_t fabric_port_name[WWN_SIZE];
+	uint16_t fp_speed;
+
+	fc_port_type_t port_type;
+
+	atomic_t state;
+	uint32_t flags;
+
+	int login_retry;
+
+	struct fc_rport *rport, *drport;
+	u32 supported_classes;
+
+	uint8_t fc4_type;
+	uint8_t	fc4f_nvme;
+	uint8_t scan_state;
+
+	unsigned long last_queue_full;
+	unsigned long last_ramp_up;
+
+	uint16_t port_id;
+
+	struct nvme_fc_remote_port *nvme_remote_port;
+
+	unsigned long retry_delay_timestamp;
+	struct qla_tgt_sess *tgt_session;
+	struct ct_sns_desc ct_desc;
+	enum discovery_state disc_state;
+	enum login_state fw_login_state;
+	unsigned long plogi_nack_done_deadline;
+
+	u32 login_gen, last_login_gen;
+	u32 rscn_gen, last_rscn_gen;
+	u32 chip_reset;
+	struct list_head gnl_entry;
+	struct work_struct del_work;
+	u8 iocb[IOCB_SIZE];
+} fc_port_t;
+
+#define QLA_FCPORT_SCAN		1
+#define QLA_FCPORT_FOUND	2
+
+struct event_arg {
+	enum fcport_mgt_event	event;
+	fc_port_t		*fcport;
+	srb_t			*sp;
+	port_id_t		id;
+	u16			data[2], rc;
+	u8			port_name[WWN_SIZE];
+	u32			iop[2];
+};
+
+#include "qla_mr.h"
+
+/*
+ * Fibre channel port/lun states.
+ */
+#define FCS_UNCONFIGURED	1
+#define FCS_DEVICE_DEAD		2
+#define FCS_DEVICE_LOST		3
+#define FCS_ONLINE		4
+
+static const char * const port_state_str[] = {
+	"Unknown",
+	"UNCONFIGURED",
+	"DEAD",
+	"LOST",
+	"ONLINE"
+};
+
+/*
+ * FC port flags.
+ */
+#define FCF_FABRIC_DEVICE	BIT_0
+#define FCF_LOGIN_NEEDED	BIT_1
+#define FCF_FCP2_DEVICE		BIT_2
+#define FCF_ASYNC_SENT		BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
+
+/* No loop ID flag. */
+#define FC_NO_LOOP_ID		0x1000
+
+/*
+ * FC-CT interface
+ *
+ * NOTE: All structures are big-endian in form.
+ */
+
+#define CT_REJECT_RESPONSE	0x8001
+#define CT_ACCEPT_RESPONSE	0x8002
+#define CT_REASON_INVALID_COMMAND_CODE		0x01
+#define CT_REASON_CANNOT_PERFORM		0x09
+#define CT_REASON_COMMAND_UNSUPPORTED		0x0b
+#define CT_EXPL_ALREADY_REGISTERED		0x10
+#define CT_EXPL_HBA_ATTR_NOT_REGISTERED		0x11
+#define CT_EXPL_MULTIPLE_HBA_ATTR		0x12
+#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH	0x13
+#define CT_EXPL_MISSING_REQ_HBA_ATTR		0x14
+#define CT_EXPL_PORT_NOT_REGISTERED_		0x15
+#define CT_EXPL_MISSING_HBA_ID_PORT_LIST	0x16
+#define CT_EXPL_HBA_NOT_REGISTERED		0x17
+#define CT_EXPL_PORT_ATTR_NOT_REGISTERED	0x20
+#define CT_EXPL_PORT_NOT_REGISTERED		0x21
+#define CT_EXPL_MULTIPLE_PORT_ATTR		0x22
+#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH	0x23
+
+#define NS_N_PORT_TYPE	0x01
+#define NS_NL_PORT_TYPE	0x02
+#define NS_NX_PORT_TYPE	0x7F
+
+#define	GA_NXT_CMD	0x100
+#define	GA_NXT_REQ_SIZE	(16 + 4)
+#define	GA_NXT_RSP_SIZE	(16 + 620)
+
+#define	GID_PT_CMD	0x1A1
+#define	GID_PT_REQ_SIZE	(16 + 4)
+
+#define	GPN_ID_CMD	0x112
+#define	GPN_ID_REQ_SIZE	(16 + 4)
+#define	GPN_ID_RSP_SIZE	(16 + 8)
+
+#define	GNN_ID_CMD	0x113
+#define	GNN_ID_REQ_SIZE	(16 + 4)
+#define	GNN_ID_RSP_SIZE	(16 + 8)
+
+#define	GFT_ID_CMD	0x117
+#define	GFT_ID_REQ_SIZE	(16 + 4)
+#define	GFT_ID_RSP_SIZE	(16 + 32)
+
+#define GID_PN_CMD 0x121
+#define GID_PN_REQ_SIZE (16 + 8)
+#define GID_PN_RSP_SIZE (16 + 4)
+
+#define	RFT_ID_CMD	0x217
+#define	RFT_ID_REQ_SIZE	(16 + 4 + 32)
+#define	RFT_ID_RSP_SIZE	16
+
+#define	RFF_ID_CMD	0x21F
+#define	RFF_ID_REQ_SIZE	(16 + 4 + 2 + 1 + 1)
+#define	RFF_ID_RSP_SIZE	16
+
+#define	RNN_ID_CMD	0x213
+#define	RNN_ID_REQ_SIZE	(16 + 4 + 8)
+#define	RNN_ID_RSP_SIZE	16
+
+#define	RSNN_NN_CMD	 0x239
+#define	RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
+#define	RSNN_NN_RSP_SIZE 16
+
+#define	GFPN_ID_CMD	0x11C
+#define	GFPN_ID_REQ_SIZE (16 + 4)
+#define	GFPN_ID_RSP_SIZE (16 + 8)
+
+#define	GPSC_CMD	0x127
+#define	GPSC_REQ_SIZE	(16 + 8)
+#define	GPSC_RSP_SIZE	(16 + 2 + 2)
+
+#define GFF_ID_CMD	0x011F
+#define GFF_ID_REQ_SIZE	(16 + 4)
+#define GFF_ID_RSP_SIZE (16 + 128)
+
+/*
+ * HBA attribute types.
+ */
+#define FDMI_HBA_ATTR_COUNT			9
+#define FDMIV2_HBA_ATTR_COUNT			17
+#define FDMI_HBA_NODE_NAME			0x1
+#define FDMI_HBA_MANUFACTURER			0x2
+#define FDMI_HBA_SERIAL_NUMBER			0x3
+#define FDMI_HBA_MODEL				0x4
+#define FDMI_HBA_MODEL_DESCRIPTION		0x5
+#define FDMI_HBA_HARDWARE_VERSION		0x6
+#define FDMI_HBA_DRIVER_VERSION			0x7
+#define FDMI_HBA_OPTION_ROM_VERSION		0x8
+#define FDMI_HBA_FIRMWARE_VERSION		0x9
+#define FDMI_HBA_OS_NAME_AND_VERSION		0xa
+#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH	0xb
+#define FDMI_HBA_NODE_SYMBOLIC_NAME		0xc
+#define FDMI_HBA_VENDOR_ID			0xd
+#define FDMI_HBA_NUM_PORTS			0xe
+#define FDMI_HBA_FABRIC_NAME			0xf
+#define FDMI_HBA_BOOT_BIOS_NAME			0x10
+#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER		0xe0
+
+struct ct_fdmi_hba_attr {
+	uint16_t type;
+	uint16_t len;
+	union {
+		uint8_t node_name[WWN_SIZE];
+		uint8_t manufacturer[64];
+		uint8_t serial_num[32];
+		uint8_t model[16+1];
+		uint8_t model_desc[80];
+		uint8_t hw_version[32];
+		uint8_t driver_version[32];
+		uint8_t orom_version[16];
+		uint8_t fw_version[32];
+		uint8_t os_version[128];
+		uint32_t max_ct_len;
+	} a;
+};
+
+struct ct_fdmi_hba_attributes {
+	uint32_t count;
+	struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
+};
+
+struct ct_fdmiv2_hba_attr {
+	uint16_t type;
+	uint16_t len;
+	union {
+		uint8_t node_name[WWN_SIZE];
+		uint8_t manufacturer[64];
+		uint8_t serial_num[32];
+		uint8_t model[16+1];
+		uint8_t model_desc[80];
+		uint8_t hw_version[16];
+		uint8_t driver_version[32];
+		uint8_t orom_version[16];
+		uint8_t fw_version[32];
+		uint8_t os_version[128];
+		uint32_t max_ct_len;
+		uint8_t sym_name[256];
+		uint32_t vendor_id;
+		uint32_t num_ports;
+		uint8_t fabric_name[WWN_SIZE];
+		uint8_t bios_name[32];
+		uint8_t vendor_identifier[8];
+	} a;
+};
+
+struct ct_fdmiv2_hba_attributes {
+	uint32_t count;
+	struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+};
+
+/*
+ * Port attribute types.
+ */
+#define FDMI_PORT_ATTR_COUNT		6
+#define FDMIV2_PORT_ATTR_COUNT		16
+#define FDMI_PORT_FC4_TYPES		0x1
+#define FDMI_PORT_SUPPORT_SPEED		0x2
+#define FDMI_PORT_CURRENT_SPEED		0x3
+#define FDMI_PORT_MAX_FRAME_SIZE	0x4
+#define FDMI_PORT_OS_DEVICE_NAME	0x5
+#define FDMI_PORT_HOST_NAME		0x6
+#define FDMI_PORT_NODE_NAME		0x7
+#define FDMI_PORT_NAME			0x8
+#define FDMI_PORT_SYM_NAME		0x9
+#define FDMI_PORT_TYPE			0xa
+#define FDMI_PORT_SUPP_COS		0xb
+#define FDMI_PORT_FABRIC_NAME		0xc
+#define FDMI_PORT_FC4_TYPE		0xd
+#define FDMI_PORT_STATE			0x101
+#define FDMI_PORT_COUNT			0x102
+#define FDMI_PORT_ID			0x103
+
+#define FDMI_PORT_SPEED_1GB		0x1
+#define FDMI_PORT_SPEED_2GB		0x2
+#define FDMI_PORT_SPEED_10GB		0x4
+#define FDMI_PORT_SPEED_4GB		0x8
+#define FDMI_PORT_SPEED_8GB		0x10
+#define FDMI_PORT_SPEED_16GB		0x20
+#define FDMI_PORT_SPEED_32GB		0x40
+#define FDMI_PORT_SPEED_UNKNOWN		0x8000
+
+#define FC_CLASS_2	0x04
+#define FC_CLASS_3	0x08
+#define FC_CLASS_2_3	0x0C
+
+struct ct_fdmiv2_port_attr {
+	uint16_t type;
+	uint16_t len;
+	union {
+		uint8_t fc4_types[32];
+		uint32_t sup_speed;
+		uint32_t cur_speed;
+		uint32_t max_frame_size;
+		uint8_t os_dev_name[32];
+		uint8_t host_name[256];
+		uint8_t node_name[WWN_SIZE];
+		uint8_t port_name[WWN_SIZE];
+		uint8_t port_sym_name[128];
+		uint32_t port_type;
+		uint32_t port_supported_cos;
+		uint8_t fabric_name[WWN_SIZE];
+		uint8_t port_fc4_type[32];
+		uint32_t port_state;
+		uint32_t num_ports;
+		uint32_t port_id;
+	} a;
+};
+
+/*
+ * Port Attribute Block.
+ */
+struct ct_fdmiv2_port_attributes {
+	uint32_t count;
+	struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
+};
+
+struct ct_fdmi_port_attr {
+	uint16_t type;
+	uint16_t len;
+	union {
+		uint8_t fc4_types[32];
+		uint32_t sup_speed;
+		uint32_t cur_speed;
+		uint32_t max_frame_size;
+		uint8_t os_dev_name[32];
+		uint8_t host_name[256];
+	} a;
+};
+
+struct ct_fdmi_port_attributes {
+	uint32_t count;
+	struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
+};
+
+/* FDMI definitions. */
+#define GRHL_CMD	0x100
+#define GHAT_CMD	0x101
+#define GRPL_CMD	0x102
+#define GPAT_CMD	0x110
+
+#define RHBA_CMD	0x200
+#define RHBA_RSP_SIZE	16
+
+#define RHAT_CMD	0x201
+#define RPRT_CMD	0x210
+
+#define RPA_CMD		0x211
+#define RPA_RSP_SIZE	16
+
+#define DHBA_CMD	0x300
+#define DHBA_REQ_SIZE	(16 + 8)
+#define DHBA_RSP_SIZE	16
+
+#define DHAT_CMD	0x301
+#define DPRT_CMD	0x310
+#define DPA_CMD		0x311
+
+/* CT command header -- request/response common fields */
+struct ct_cmd_hdr {
+	uint8_t revision;
+	uint8_t in_id[3];
+	uint8_t gs_type;
+	uint8_t gs_subtype;
+	uint8_t options;
+	uint8_t reserved;
+};
+
+/* CT command request */
+struct ct_sns_req {
+	struct ct_cmd_hdr header;
+	uint16_t command;
+	uint16_t max_rsp_size;
+	uint8_t fragment_id;
+	uint8_t reserved[3];
+
+	union {
+		/* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */
+		struct {
+			uint8_t reserved;
+			uint8_t port_id[3];
+		} port_id;
+
+		struct {
+			uint8_t port_type;
+			uint8_t domain;
+			uint8_t area;
+			uint8_t reserved;
+		} gid_pt;
+
+		struct {
+			uint8_t reserved;
+			uint8_t port_id[3];
+			uint8_t fc4_types[32];
+		} rft_id;
+
+		struct {
+			uint8_t reserved;
+			uint8_t port_id[3];
+			uint16_t reserved2;
+			uint8_t fc4_feature;
+			uint8_t fc4_type;
+		} rff_id;
+
+		struct {
+			uint8_t reserved;
+			uint8_t port_id[3];
+			uint8_t node_name[8];
+		} rnn_id;
+
+		struct {
+			uint8_t node_name[8];
+			uint8_t name_len;
+			uint8_t sym_node_name[255];
+		} rsnn_nn;
+
+		struct {
+			uint8_t hba_identifier[8];
+		} ghat;
+
+		struct {
+			uint8_t hba_identifier[8];
+			uint32_t entry_count;
+			uint8_t port_name[8];
+			struct ct_fdmi_hba_attributes attrs;
+		} rhba;
+
+		struct {
+			uint8_t hba_identifier[8];
+			uint32_t entry_count;
+			uint8_t port_name[8];
+			struct ct_fdmiv2_hba_attributes attrs;
+		} rhba2;
+
+		struct {
+			uint8_t hba_identifier[8];
+			struct ct_fdmi_hba_attributes attrs;
+		} rhat;
+
+		struct {
+			uint8_t port_name[8];
+			struct ct_fdmi_port_attributes attrs;
+		} rpa;
+
+		struct {
+			uint8_t port_name[8];
+			struct ct_fdmiv2_port_attributes attrs;
+		} rpa2;
+
+		struct {
+			uint8_t port_name[8];
+		} dhba;
+
+		struct {
+			uint8_t port_name[8];
+		} dhat;
+
+		struct {
+			uint8_t port_name[8];
+		} dprt;
+
+		struct {
+			uint8_t port_name[8];
+		} dpa;
+
+		struct {
+			uint8_t port_name[8];
+		} gpsc;
+
+		struct {
+			uint8_t reserved;
+			uint8_t port_id[3];
+		} gff_id;
+
+		struct {
+			uint8_t port_name[8];
+		} gid_pn;
+	} req;
+};
+
+/* CT command response header */
+struct ct_rsp_hdr {
+	struct ct_cmd_hdr header;
+	uint16_t response;
+	uint16_t residual;
+	uint8_t fragment_id;
+	uint8_t reason_code;
+	uint8_t explanation_code;
+	uint8_t vendor_unique;
+};
+
+struct ct_sns_gid_pt_data {
+	uint8_t control_byte;
+	uint8_t port_id[3];
+};
+
+struct ct_sns_rsp {
+	struct ct_rsp_hdr header;
+
+	union {
+		struct {
+			uint8_t port_type;
+			uint8_t port_id[3];
+			uint8_t port_name[8];
+			uint8_t sym_port_name_len;
+			uint8_t sym_port_name[255];
+			uint8_t node_name[8];
+			uint8_t sym_node_name_len;
+			uint8_t sym_node_name[255];
+			uint8_t init_proc_assoc[8];
+			uint8_t node_ip_addr[16];
+			uint8_t class_of_service[4];
+			uint8_t fc4_types[32];
+			uint8_t ip_address[16];
+			uint8_t fabric_port_name[8];
+			uint8_t reserved;
+			uint8_t hard_address[3];
+		} ga_nxt;
+
+		struct {
+			/* Assume the largest number of targets for the union */
+			struct ct_sns_gid_pt_data
+			    entries[MAX_FIBRE_DEVICES_MAX];
+		} gid_pt;
+
+		struct {
+			uint8_t port_name[8];
+		} gpn_id;
+
+		struct {
+			uint8_t node_name[8];
+		} gnn_id;
+
+		struct {
+			uint8_t fc4_types[32];
+		} gft_id;
+
+		struct {
+			uint32_t entry_count;
+			uint8_t port_name[8];
+			struct ct_fdmi_hba_attributes attrs;
+		} ghat;
+
+		struct {
+			uint8_t port_name[8];
+		} gfpn_id;
+
+		struct {
+			uint16_t speeds;
+			uint16_t speed;
+		} gpsc;
+
+#define GFF_FCP_SCSI_OFFSET	7
+#define GFF_NVME_OFFSET		23 /* type = 28h */
+		struct {
+			uint8_t fc4_features[128];
+		} gff_id;
+		struct {
+			uint8_t reserved;
+			uint8_t port_id[3];
+		} gid_pn;
+	} rsp;
+};
+
+struct ct_sns_pkt {
+	union {
+		struct ct_sns_req req;
+		struct ct_sns_rsp rsp;
+	} p;
+};
+
+/*
+ * SNS command structures -- for 2200 compatibility.
+ */
+#define	RFT_ID_SNS_SCMD_LEN	22
+#define	RFT_ID_SNS_CMD_SIZE	60
+#define	RFT_ID_SNS_DATA_SIZE	16
+
+#define	RNN_ID_SNS_SCMD_LEN	10
+#define	RNN_ID_SNS_CMD_SIZE	36
+#define	RNN_ID_SNS_DATA_SIZE	16
+
+#define	GA_NXT_SNS_SCMD_LEN	6
+#define	GA_NXT_SNS_CMD_SIZE	28
+#define	GA_NXT_SNS_DATA_SIZE	(620 + 16)
+
+#define	GID_PT_SNS_SCMD_LEN	6
+#define	GID_PT_SNS_CMD_SIZE	28
+/*
+ * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older
+ * adapters.
+ */
+#define	GID_PT_SNS_DATA_SIZE	(MAX_FIBRE_DEVICES_2100 * 4 + 16)
+
+#define	GPN_ID_SNS_SCMD_LEN	6
+#define	GPN_ID_SNS_CMD_SIZE	28
+#define	GPN_ID_SNS_DATA_SIZE	(8 + 16)
+
+#define	GNN_ID_SNS_SCMD_LEN	6
+#define	GNN_ID_SNS_CMD_SIZE	28
+#define	GNN_ID_SNS_DATA_SIZE	(8 + 16)
+
+struct sns_cmd_pkt {
+	union {
+		struct {
+			uint16_t buffer_length;
+			uint16_t reserved_1;
+			uint32_t buffer_address[2];
+			uint16_t subcommand_length;
+			uint16_t reserved_2;
+			uint16_t subcommand;
+			uint16_t size;
+			uint32_t reserved_3;
+			uint8_t param[36];
+		} cmd;
+
+		uint8_t rft_data[RFT_ID_SNS_DATA_SIZE];
+		uint8_t rnn_data[RNN_ID_SNS_DATA_SIZE];
+		uint8_t gan_data[GA_NXT_SNS_DATA_SIZE];
+		uint8_t gid_data[GID_PT_SNS_DATA_SIZE];
+		uint8_t gpn_data[GPN_ID_SNS_DATA_SIZE];
+		uint8_t gnn_data[GNN_ID_SNS_DATA_SIZE];
+	} p;
+};
+
+struct fw_blob {
+	char *name;
+	uint32_t segs[4];
+	const struct firmware *fw;
+};
+
+/* Return data from MBC_GET_ID_LIST call. */
+struct gid_list_info {
+	uint8_t	al_pa;
+	uint8_t	area;
+	uint8_t	domain;
+	uint8_t	loop_id_2100;	/* ISP2100/ISP2200 -- 4 bytes. */
+	uint16_t loop_id;	/* ISP23XX         -- 6 bytes. */
+	uint16_t reserved_1;	/* ISP24XX         -- 8 bytes. */
+};
+
+/* NPIV */
+typedef struct vport_info {
+	uint8_t		port_name[WWN_SIZE];
+	uint8_t		node_name[WWN_SIZE];
+	int		vp_id;
+	uint16_t	loop_id;
+	unsigned long	host_no;
+	uint8_t		port_id[3];
+	int		loop_state;
+} vport_info_t;
+
+typedef struct vport_params {
+	uint8_t 	port_name[WWN_SIZE];
+	uint8_t 	node_name[WWN_SIZE];
+	uint32_t 	options;
+#define	VP_OPTS_RETRY_ENABLE	BIT_0
+#define	VP_OPTS_VP_DISABLE	BIT_1
+} vport_params_t;
+
+/* NPIV - return codes of VP create and modify */
+#define VP_RET_CODE_OK			0
+#define VP_RET_CODE_FATAL		1
+#define VP_RET_CODE_WRONG_ID		2
+#define VP_RET_CODE_WWPN		3
+#define VP_RET_CODE_RESOURCES		4
+#define VP_RET_CODE_NO_MEM		5
+#define VP_RET_CODE_NOT_FOUND		6
+
+struct qla_hw_data;
+struct rsp_que;
+/*
+ * ISP operations
+ */
+struct isp_operations {
+
+	int (*pci_config) (struct scsi_qla_host *);
+	void (*reset_chip) (struct scsi_qla_host *);
+	int (*chip_diag) (struct scsi_qla_host *);
+	void (*config_rings) (struct scsi_qla_host *);
+	void (*reset_adapter) (struct scsi_qla_host *);
+	int (*nvram_config) (struct scsi_qla_host *);
+	void (*update_fw_options) (struct scsi_qla_host *);
+	int (*load_risc) (struct scsi_qla_host *, uint32_t *);
+
+	char * (*pci_info_str) (struct scsi_qla_host *, char *);
+	char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
+
+	irq_handler_t intr_handler;
+	void (*enable_intrs) (struct qla_hw_data *);
+	void (*disable_intrs) (struct qla_hw_data *);
+
+	int (*abort_command) (srb_t *);
+	int (*target_reset) (struct fc_port *, uint64_t, int);
+	int (*lun_reset) (struct fc_port *, uint64_t, int);
+	int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
+		uint8_t, uint8_t, uint16_t *, uint8_t);
+	int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
+	    uint8_t, uint8_t);
+
+	uint16_t (*calc_req_entries) (uint16_t);
+	void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
+	void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *);
+	void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
+	    uint32_t);
+
+	uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *,
+		uint32_t, uint32_t);
+	int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
+		uint32_t);
+
+	void (*fw_dump) (struct scsi_qla_host *, int);
+
+	int (*beacon_on) (struct scsi_qla_host *);
+	int (*beacon_off) (struct scsi_qla_host *);
+	void (*beacon_blink) (struct scsi_qla_host *);
+
+	uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *,
+		uint32_t, uint32_t);
+	int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
+		uint32_t);
+
+	int (*get_flash_version) (struct scsi_qla_host *, void *);
+	int (*start_scsi) (srb_t *);
+	int (*start_scsi_mq) (srb_t *);
+	int (*abort_isp) (struct scsi_qla_host *);
+	int (*iospace_config)(struct qla_hw_data*);
+	int (*initialize_adapter)(struct scsi_qla_host *);
+};
+
+/* MSI-X Support *************************************************************/
+
+#define QLA_MSIX_CHIP_REV_24XX	3
+#define QLA_MSIX_FW_MODE(m)	(((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
+#define QLA_MSIX_FW_MODE_1(m)	(QLA_MSIX_FW_MODE(m) == 1)
+
+#define QLA_BASE_VECTORS	2 /* default + RSP */
+#define QLA_MSIX_RSP_Q			0x01
+#define QLA_ATIO_VECTOR		0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q	0x03
+
+#define QLA_MIDX_DEFAULT	0
+#define QLA_MIDX_RSP_Q		1
+#define QLA_PCI_MSIX_CONTROL	0xa2
+#define QLA_83XX_PCI_MSIX_CONTROL	0x92
+
+struct scsi_qla_host;
+
+
+#define QLA83XX_RSPQ_MSIX_ENTRY_NUMBER 1 /* refer to qla83xx_msix_entries */
+
+struct qla_msix_entry {
+	int have_irq;
+	int in_use;
+	uint32_t vector;
+	uint16_t entry;
+	char name[30];
+	void *handle;
+	int cpuid;
+};
+
+#define	WATCH_INTERVAL		1       /* number of seconds */
+
+/* Work events.  */
+enum qla_work_type {
+	QLA_EVT_AEN,
+	QLA_EVT_IDC_ACK,
+	QLA_EVT_ASYNC_LOGIN,
+	QLA_EVT_ASYNC_LOGOUT,
+	QLA_EVT_ASYNC_LOGOUT_DONE,
+	QLA_EVT_ASYNC_ADISC,
+	QLA_EVT_ASYNC_ADISC_DONE,
+	QLA_EVT_UEVENT,
+	QLA_EVT_AENFX,
+	QLA_EVT_GIDPN,
+	QLA_EVT_GPNID,
+	QLA_EVT_GPNID_DONE,
+	QLA_EVT_NEW_SESS,
+	QLA_EVT_GPDB,
+	QLA_EVT_PRLI,
+	QLA_EVT_GPSC,
+	QLA_EVT_UPD_FCPORT,
+	QLA_EVT_GNL,
+	QLA_EVT_NACK,
+};
+
+
+struct qla_work_evt {
+	struct list_head	list;
+	enum qla_work_type	type;
+	u32			flags;
+#define QLA_EVT_FLAG_FREE	0x1
+
+	union {
+		struct {
+			enum fc_host_event_code code;
+			u32 data;
+		} aen;
+		struct {
+#define QLA_IDC_ACK_REGS	7
+			uint16_t mb[QLA_IDC_ACK_REGS];
+		} idc_ack;
+		struct {
+			struct fc_port *fcport;
+#define QLA_LOGIO_LOGIN_RETRIED	BIT_0
+			u16 data[2];
+		} logio;
+		struct {
+			u32 code;
+#define QLA_UEVENT_CODE_FW_DUMP	0
+		} uevent;
+		struct {
+			uint32_t        evtcode;
+			uint32_t        mbx[8];
+			uint32_t        count;
+		} aenfx;
+		struct {
+			srb_t *sp;
+		} iosb;
+		struct {
+			port_id_t id;
+		} gpnid;
+		struct {
+			port_id_t id;
+			u8 port_name[8];
+			void *pla;
+		} new_sess;
+		struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
+			fc_port_t *fcport;
+			u8 opt;
+		} fcport;
+		struct {
+			fc_port_t *fcport;
+			u8 iocb[IOCB_SIZE];
+			int type;
+		} nack;
+	 } u;
+};
+
+struct qla_chip_state_84xx {
+	struct list_head list;
+	struct kref kref;
+
+	void *bus;
+	spinlock_t access_lock;
+	struct mutex fw_update_mutex;
+	uint32_t fw_update;
+	uint32_t op_fw_version;
+	uint32_t op_fw_size;
+	uint32_t op_fw_seq_size;
+	uint32_t diag_fw_version;
+	uint32_t gold_fw_version;
+};
+
+struct qla_dif_statistics {
+	uint64_t dif_input_bytes;
+	uint64_t dif_output_bytes;
+	uint64_t dif_input_requests;
+	uint64_t dif_output_requests;
+	uint32_t dif_guard_err;
+	uint32_t dif_ref_tag_err;
+	uint32_t dif_app_tag_err;
+};
+
+struct qla_statistics {
+	uint32_t total_isp_aborts;
+	uint64_t input_bytes;
+	uint64_t output_bytes;
+	uint64_t input_requests;
+	uint64_t output_requests;
+	uint32_t control_requests;
+
+	uint64_t jiffies_at_last_reset;
+	uint32_t stat_max_pend_cmds;
+	uint32_t stat_max_qfull_cmds_alloc;
+	uint32_t stat_max_qfull_cmds_dropped;
+
+	struct qla_dif_statistics qla_dif_stats;
+};
+
+struct bidi_statistics {
+	unsigned long long io_count;
+	unsigned long long transfer_bytes;
+};
+
+struct qla_tc_param {
+	struct scsi_qla_host *vha;
+	uint32_t blk_sz;
+	uint32_t bufflen;
+	struct scatterlist *sg;
+	struct scatterlist *prot_sg;
+	struct crc_context *ctx;
+	uint8_t *ctx_dsd_alloced;
+};
+
+/* Multi queue support */
+#define MBC_INITIALIZE_MULTIQ 0x1f
+#define QLA_QUE_PAGE 0X1000
+#define QLA_MQ_SIZE 32
+#define QLA_MAX_QUEUES 256
+#define ISP_QUE_REG(ha, id) \
+	((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
+	 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
+	 ((void __iomem *)ha->iobase))
+#define QLA_REQ_QUE_ID(tag) \
+	((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
+#define QLA_DEFAULT_QUE_QOS 5
+#define QLA_PRECONFIG_VPORTS 32
+#define QLA_MAX_VPORTS_QLA24XX	128
+#define QLA_MAX_VPORTS_QLA25XX	256
+
+struct qla_tgt_counters {
+	uint64_t qla_core_sbt_cmd;
+	uint64_t core_qla_que_buf;
+	uint64_t qla_core_ret_ctio;
+	uint64_t core_qla_snd_status;
+	uint64_t qla_core_ret_sta_ctio;
+	uint64_t core_qla_free_cmd;
+	uint64_t num_q_full_sent;
+	uint64_t num_alloc_iocb_failed;
+	uint64_t num_term_xchg_sent;
+};
+
+struct qla_qpair;
+
+/* Response queue data structure */
+struct rsp_que {
+	dma_addr_t  dma;
+	response_t *ring;
+	response_t *ring_ptr;
+	uint32_t __iomem *rsp_q_in;	/* FWI2-capable only. */
+	uint32_t __iomem *rsp_q_out;
+	uint16_t  ring_index;
+	uint16_t  out_ptr;
+	uint16_t  *in_ptr;		/* queue shadow in index */
+	uint16_t  length;
+	uint16_t  options;
+	uint16_t  rid;
+	uint16_t  id;
+	uint16_t  vp_idx;
+	struct qla_hw_data *hw;
+	struct qla_msix_entry *msix;
+	struct req_que *req;
+	srb_t *status_srb; /* status continuation entry */
+	struct qla_qpair *qpair;
+
+	dma_addr_t  dma_fx00;
+	response_t *ring_fx00;
+	uint16_t  length_fx00;
+	uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
+};
+
+/* Request queue data structure */
+struct req_que {
+	dma_addr_t  dma;
+	request_t *ring;
+	request_t *ring_ptr;
+	uint32_t __iomem *req_q_in;	/* FWI2-capable only. */
+	uint32_t __iomem *req_q_out;
+	uint16_t  ring_index;
+	uint16_t  in_ptr;
+	uint16_t  *out_ptr;		/* queue shadow out index */
+	uint16_t  cnt;
+	uint16_t  length;
+	uint16_t  options;
+	uint16_t  rid;
+	uint16_t  id;
+	uint16_t  qos;
+	uint16_t  vp_idx;
+	struct rsp_que *rsp;
+	srb_t **outstanding_cmds;
+	uint32_t current_outstanding_cmd;
+	uint16_t num_outstanding_cmds;
+	int max_q_depth;
+
+	dma_addr_t  dma_fx00;
+	request_t *ring_fx00;
+	uint16_t  length_fx00;
+	uint8_t req_pkt[REQUEST_ENTRY_SIZE];
+};
+
+/*Queue pair data structure */
+struct qla_qpair {
+	spinlock_t qp_lock;
+	atomic_t ref_count;
+	uint32_t lun_cnt;
+	/*
+	 * For qpair 0, qp_lock_ptr will point at hardware_lock due to
+	 * legacy code. For other Qpair(s), it will point at qp_lock.
+	 */
+	spinlock_t *qp_lock_ptr;
+	struct scsi_qla_host *vha;
+	u32 chip_reset;
+
+	/* distill these fields down to 'online=0/1'
+	 * ha->flags.eeh_busy
+	 * ha->flags.pci_channel_io_perm_failure
+	 * base_vha->loop_state
+	 */
+	uint32_t online:1;
+	/* move vha->flags.difdix_supported here */
+	uint32_t difdix_supported:1;
+	uint32_t delete_in_progress:1;
+	uint32_t fw_started:1;
+	uint32_t enable_class_2:1;
+	uint32_t enable_explicit_conf:1;
+	uint32_t use_shadow_reg:1;
+
+	uint16_t id;			/* qp number used with FW */
+	uint16_t vp_idx;		/* vport ID */
+	mempool_t *srb_mempool;
+
+	struct pci_dev  *pdev;
+	void (*reqq_start_iocbs)(struct qla_qpair *);
+
+	/* to do: New driver: move queues to here instead of pointers */
+	struct req_que *req;
+	struct rsp_que *rsp;
+	struct atio_que *atio;
+	struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+	struct qla_hw_data *hw;
+	struct work_struct q_work;
+	struct list_head qp_list_elem; /* vha->qp_list */
+	struct list_head hints_list;
+	struct list_head nvme_done_list;
+	uint16_t cpuid;
+	struct qla_tgt_counters tgt_counters;
+};
+
+/* Place holder for FW buffer parameters */
+struct qlfc_fw {
+	void *fw_buf;
+	dma_addr_t fw_dma;
+	uint32_t len;
+};
+
+struct scsi_qlt_host {
+	void *target_lport_ptr;
+	struct mutex tgt_mutex;
+	struct mutex tgt_host_action_mutex;
+	struct qla_tgt *qla_tgt;
+};
+
+struct qlt_hw_data {
+	/* Protected by hw lock */
+	uint32_t node_name_set:1;
+
+	dma_addr_t atio_dma;	/* Physical address. */
+	struct atio *atio_ring;	/* Base virtual address */
+	struct atio *atio_ring_ptr;	/* Current address. */
+	uint16_t atio_ring_index; /* Current index. */
+	uint16_t atio_q_length;
+	uint32_t __iomem *atio_q_in;
+	uint32_t __iomem *atio_q_out;
+
+	struct qla_tgt_func_tmpl *tgt_ops;
+	struct qla_tgt_vp_map *tgt_vp_map;
+
+	int saved_set;
+	uint16_t saved_exchange_count;
+	uint32_t saved_firmware_options_1;
+	uint32_t saved_firmware_options_2;
+	uint32_t saved_firmware_options_3;
+	uint8_t saved_firmware_options[2];
+	uint8_t saved_add_firmware_options[2];
+
+	uint8_t tgt_node_name[WWN_SIZE];
+
+	struct dentry *dfs_tgt_sess;
+	struct dentry *dfs_tgt_port_database;
+	struct dentry *dfs_naqp;
+
+	struct list_head q_full_list;
+	uint32_t num_pend_cmds;
+	uint32_t num_qfull_cmds_alloc;
+	uint32_t num_qfull_cmds_dropped;
+	spinlock_t q_full_lock;
+	uint32_t leak_exchg_thresh_hold;
+	spinlock_t sess_lock;
+	int num_act_qpairs;
+#define DEFAULT_NAQP 2
+	spinlock_t atio_lock ____cacheline_aligned;
+	struct btree_head32 host_map;
+};
+
+#define MAX_QFULL_CMDS_ALLOC	8192
+#define Q_FULL_THRESH_HOLD_PERCENT 90
+#define Q_FULL_THRESH_HOLD(ha) \
+	((ha->cur_fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
+
+#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75	/* 75 percent */
+
+#define QLA_EARLY_LINKUP(_ha) \
+	((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+	 _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
+/*
+ * Qlogic host adapter specific data structure.
+*/
+struct qla_hw_data {
+	struct pci_dev  *pdev;
+	/* SRB cache. */
+#define SRB_MIN_REQ     128
+	mempool_t       *srb_mempool;
+
+	volatile struct {
+		uint32_t	mbox_int		:1;
+		uint32_t	mbox_busy		:1;
+		uint32_t	disable_risc_code_load	:1;
+		uint32_t	enable_64bit_addressing	:1;
+		uint32_t	enable_lip_reset	:1;
+		uint32_t	enable_target_reset	:1;
+		uint32_t	enable_lip_full_login	:1;
+		uint32_t	enable_led_scheme	:1;
+
+		uint32_t	msi_enabled		:1;
+		uint32_t	msix_enabled		:1;
+		uint32_t	disable_serdes		:1;
+		uint32_t	gpsc_supported		:1;
+		uint32_t	npiv_supported		:1;
+		uint32_t	pci_channel_io_perm_failure	:1;
+		uint32_t	fce_enabled		:1;
+		uint32_t	fac_supported		:1;
+
+		uint32_t	chip_reset_done		:1;
+		uint32_t	running_gold_fw		:1;
+		uint32_t	eeh_busy		:1;
+		uint32_t	disable_msix_handshake	:1;
+		uint32_t	fcp_prio_enabled	:1;
+		uint32_t	isp82xx_fw_hung:1;
+		uint32_t	nic_core_hung:1;
+
+		uint32_t	quiesce_owner:1;
+		uint32_t	nic_core_reset_hdlr_active:1;
+		uint32_t	nic_core_reset_owner:1;
+		uint32_t	isp82xx_no_md_cap:1;
+		uint32_t	host_shutting_down:1;
+		uint32_t	idc_compl_status:1;
+		uint32_t        mr_reset_hdlr_active:1;
+		uint32_t        mr_intr_valid:1;
+
+		uint32_t        dport_enabled:1;
+		uint32_t	fawwpn_enabled:1;
+		uint32_t	exlogins_enabled:1;
+		uint32_t	exchoffld_enabled:1;
+
+		uint32_t	lip_ae:1;
+		uint32_t	n2n_ae:1;
+		uint32_t	fw_started:1;
+		uint32_t	fw_init_done:1;
+
+		uint32_t	detected_lr_sfp:1;
+		uint32_t	using_lr_setting:1;
+	} flags;
+
+	uint16_t long_range_distance;	/* 32G & above */
+#define LR_DISTANCE_5K  1
+#define LR_DISTANCE_10K 0
+
+	/* This spinlock is used to protect "io transactions", you must
+	* acquire it before doing any IO to the card, eg with RD_REG*() and
+	* WRT_REG*() for the duration of your entire commandtransaction.
+	*
+	* This spinlock is of lower priority than the io request lock.
+	*/
+
+	spinlock_t	hardware_lock ____cacheline_aligned;
+	int		bars;
+	int		mem_only;
+	device_reg_t *iobase;           /* Base I/O address */
+	resource_size_t pio_address;
+
+#define MIN_IOBASE_LEN          0x100
+	dma_addr_t		bar0_hdl;
+
+	void __iomem *cregbase;
+	dma_addr_t		bar2_hdl;
+#define BAR0_LEN_FX00			(1024 * 1024)
+#define BAR2_LEN_FX00			(128 * 1024)
+
+	uint32_t		rqstq_intr_code;
+	uint32_t		mbx_intr_code;
+	uint32_t		req_que_len;
+	uint32_t		rsp_que_len;
+	uint32_t		req_que_off;
+	uint32_t		rsp_que_off;
+
+	/* Multi queue data structs */
+	device_reg_t *mqiobase;
+	device_reg_t *msixbase;
+	uint16_t        msix_count;
+	uint8_t         mqenable;
+	struct req_que **req_q_map;
+	struct rsp_que **rsp_q_map;
+	struct qla_qpair **queue_pair_map;
+	unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+	unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+	unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+		/ sizeof(unsigned long)];
+	uint8_t 	max_req_queues;
+	uint8_t 	max_rsp_queues;
+	uint8_t		max_qpairs;
+	uint8_t		num_qpairs;
+	struct qla_qpair *base_qpair;
+	struct qla_npiv_entry *npiv_info;
+	uint16_t	nvram_npiv_size;
+
+	uint16_t        switch_cap;
+#define FLOGI_SEQ_DEL           BIT_8
+#define FLOGI_MID_SUPPORT       BIT_10
+#define FLOGI_VSAN_SUPPORT      BIT_12
+#define FLOGI_SP_SUPPORT        BIT_13
+
+	uint8_t		port_no;		/* Physical port of adapter */
+	uint8_t		exch_starvation;
+
+	/* Timeout timers. */
+	uint8_t 	loop_down_abort_time;    /* port down timer */
+	atomic_t	loop_down_timer;         /* loop down timer */
+	uint8_t		link_down_timeout;       /* link down timeout */
+	uint16_t	max_loop_id;
+	uint16_t	max_fibre_devices;	/* Maximum number of targets */
+
+	uint16_t	fb_rev;
+	uint16_t	min_external_loopid;    /* First external loop Id */
+
+#define PORT_SPEED_UNKNOWN 0xFFFF
+#define PORT_SPEED_1GB  0x00
+#define PORT_SPEED_2GB  0x01
+#define PORT_SPEED_4GB  0x03
+#define PORT_SPEED_8GB  0x04
+#define PORT_SPEED_16GB 0x05
+#define PORT_SPEED_32GB 0x06
+#define PORT_SPEED_10GB	0x13
+	uint16_t	link_data_rate;         /* F/W operating speed */
+
+	uint8_t		current_topology;
+	uint8_t		prev_topology;
+#define ISP_CFG_NL	1
+#define ISP_CFG_N	2
+#define ISP_CFG_FL	4
+#define ISP_CFG_F	8
+
+	uint8_t		operating_mode;         /* F/W operating mode */
+#define LOOP      0
+#define P2P       1
+#define LOOP_P2P  2
+#define P2P_LOOP  3
+	uint8_t		interrupts_on;
+	uint32_t	isp_abort_cnt;
+#define PCI_DEVICE_ID_QLOGIC_ISP2532    0x2532
+#define PCI_DEVICE_ID_QLOGIC_ISP8432    0x8432
+#define PCI_DEVICE_ID_QLOGIC_ISP8001	0x8001
+#define PCI_DEVICE_ID_QLOGIC_ISP8031	0x8031
+#define PCI_DEVICE_ID_QLOGIC_ISP2031	0x2031
+#define PCI_DEVICE_ID_QLOGIC_ISP2071	0x2071
+#define PCI_DEVICE_ID_QLOGIC_ISP2271	0x2271
+#define PCI_DEVICE_ID_QLOGIC_ISP2261	0x2261
+
+	uint32_t	isp_type;
+#define DT_ISP2100                      BIT_0
+#define DT_ISP2200                      BIT_1
+#define DT_ISP2300                      BIT_2
+#define DT_ISP2312                      BIT_3
+#define DT_ISP2322                      BIT_4
+#define DT_ISP6312                      BIT_5
+#define DT_ISP6322                      BIT_6
+#define DT_ISP2422                      BIT_7
+#define DT_ISP2432                      BIT_8
+#define DT_ISP5422                      BIT_9
+#define DT_ISP5432                      BIT_10
+#define DT_ISP2532                      BIT_11
+#define DT_ISP8432                      BIT_12
+#define DT_ISP8001			BIT_13
+#define DT_ISP8021			BIT_14
+#define DT_ISP2031			BIT_15
+#define DT_ISP8031			BIT_16
+#define DT_ISPFX00			BIT_17
+#define DT_ISP8044			BIT_18
+#define DT_ISP2071			BIT_19
+#define DT_ISP2271			BIT_20
+#define DT_ISP2261			BIT_21
+#define DT_ISP_LAST			(DT_ISP2261 << 1)
+
+	uint32_t	device_type;
+#define DT_T10_PI                       BIT_25
+#define DT_IIDMA                        BIT_26
+#define DT_FWI2                         BIT_27
+#define DT_ZIO_SUPPORTED                BIT_28
+#define DT_OEM_001                      BIT_29
+#define DT_ISP2200A                     BIT_30
+#define DT_EXTENDED_IDS                 BIT_31
+
+#define DT_MASK(ha)     ((ha)->isp_type & (DT_ISP_LAST - 1))
+#define IS_QLA2100(ha)  (DT_MASK(ha) & DT_ISP2100)
+#define IS_QLA2200(ha)  (DT_MASK(ha) & DT_ISP2200)
+#define IS_QLA2300(ha)  (DT_MASK(ha) & DT_ISP2300)
+#define IS_QLA2312(ha)  (DT_MASK(ha) & DT_ISP2312)
+#define IS_QLA2322(ha)  (DT_MASK(ha) & DT_ISP2322)
+#define IS_QLA6312(ha)  (DT_MASK(ha) & DT_ISP6312)
+#define IS_QLA6322(ha)  (DT_MASK(ha) & DT_ISP6322)
+#define IS_QLA2422(ha)  (DT_MASK(ha) & DT_ISP2422)
+#define IS_QLA2432(ha)  (DT_MASK(ha) & DT_ISP2432)
+#define IS_QLA5422(ha)  (DT_MASK(ha) & DT_ISP5422)
+#define IS_QLA5432(ha)  (DT_MASK(ha) & DT_ISP5432)
+#define IS_QLA2532(ha)  (DT_MASK(ha) & DT_ISP2532)
+#define IS_QLA8432(ha)  (DT_MASK(ha) & DT_ISP8432)
+#define IS_QLA8001(ha)	(DT_MASK(ha) & DT_ISP8001)
+#define IS_QLA81XX(ha)	(IS_QLA8001(ha))
+#define IS_QLA82XX(ha)	(DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA8044(ha)  (DT_MASK(ha) & DT_ISP8044)
+#define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)
+#define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)
+#define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00)
+#define IS_QLA2071(ha)	(DT_MASK(ha) & DT_ISP2071)
+#define IS_QLA2271(ha)	(DT_MASK(ha) & DT_ISP2271)
+#define IS_QLA2261(ha)	(DT_MASK(ha) & DT_ISP2261)
+
+#define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
+			IS_QLA6312(ha) || IS_QLA6322(ha))
+#define IS_QLA24XX(ha)  (IS_QLA2422(ha) || IS_QLA2432(ha))
+#define IS_QLA54XX(ha)  (IS_QLA5422(ha) || IS_QLA5432(ha))
+#define IS_QLA25XX(ha)  (IS_QLA2532(ha))
+#define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))
+#define IS_QLA84XX(ha)  (IS_QLA8432(ha))
+#define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
+#define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
+				IS_QLA84XX(ha))
+#define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
+				IS_QLA8031(ha) || IS_QLA8044(ha))
+#define IS_P3P_TYPE(ha)		(IS_QLA82XX(ha) || IS_QLA8044(ha))
+#define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
+				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA8044(ha) || IS_QLA27XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
+#define IS_NOPOLLING_TYPE(ha)	(IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
+#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
+#define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+
+#define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI)
+#define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)
+#define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)
+#define IS_ZIO_SUPPORTED(ha)    ((ha)->device_type & DT_ZIO_SUPPORTED)
+#define IS_OEM_001(ha)          ((ha)->device_type & DT_OEM_001)
+#define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)
+#define IS_CT6_SUPPORTED(ha)	((ha)->device_type & DT_CT6_SUPPORTED)
+#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha) || \
+				IS_QLA27XX(ha))
+#define IS_BIDI_CAPABLE(ha)	((IS_QLA25XX(ha) || IS_QLA2031(ha)))
+/* Bit 21 of fw_attributes decides the MCTP capabilities */
+#define IS_MCTP_CAPABLE(ha)	(IS_QLA2031(ha) && \
+				((ha)->fw_attributes_ext[0] & BIT_0))
+#define IS_PI_UNINIT_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_DIFB_DIX0_CAPABLE(ha)	(0)
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE(ha)	(IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
+    (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha)	(ha->tgt.atio_q_length)
+#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
+#define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_FAWWN_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_EXCHG_OFFLD_CAPABLE(ha) \
+	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
+	(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+
+	/* HBA serial number */
+	uint8_t		serial0;
+	uint8_t		serial1;
+	uint8_t		serial2;
+
+	/* NVRAM configuration data */
+#define MAX_NVRAM_SIZE  4096
+#define VPD_OFFSET      MAX_NVRAM_SIZE / 2
+	uint16_t	nvram_size;
+	uint16_t	nvram_base;
+	void		*nvram;
+	uint16_t	vpd_size;
+	uint16_t	vpd_base;
+	void		*vpd;
+
+	uint16_t	loop_reset_delay;
+	uint8_t		retry_count;
+	uint8_t		login_timeout;
+	uint16_t	r_a_tov;
+	int		port_down_retry_count;
+	uint8_t		mbx_count;
+	uint8_t		aen_mbx_count;
+
+	uint32_t	login_retry_count;
+	/* SNS command interfaces. */
+	ms_iocb_entry_t		*ms_iocb;
+	dma_addr_t		ms_iocb_dma;
+	struct ct_sns_pkt	*ct_sns;
+	dma_addr_t		ct_sns_dma;
+	/* SNS command interfaces for 2200. */
+	struct sns_cmd_pkt	*sns_cmd;
+	dma_addr_t		sns_cmd_dma;
+
+#define SFP_DEV_SIZE    512
+#define SFP_BLOCK_SIZE  64
+	void		*sfp_data;
+	dma_addr_t	sfp_data_dma;
+
+#define XGMAC_DATA_SIZE	4096
+	void		*xgmac_data;
+	dma_addr_t	xgmac_data_dma;
+
+#define DCBX_TLV_DATA_SIZE 4096
+	void		*dcbx_tlv;
+	dma_addr_t	dcbx_tlv_dma;
+
+	struct task_struct	*dpc_thread;
+	uint8_t dpc_active;                  /* DPC routine is active */
+
+	dma_addr_t	gid_list_dma;
+	struct gid_list_info *gid_list;
+	int		gid_list_info_size;
+
+	/* Small DMA pool allocations -- maximum 256 bytes in length. */
+#define DMA_POOL_SIZE   256
+	struct dma_pool *s_dma_pool;
+
+	dma_addr_t	init_cb_dma;
+	init_cb_t	*init_cb;
+	int		init_cb_size;
+	dma_addr_t	ex_init_cb_dma;
+	struct ex_init_cb_81xx *ex_init_cb;
+
+	void		*async_pd;
+	dma_addr_t	async_pd_dma;
+
+#define ENABLE_EXTENDED_LOGIN	BIT_7
+
+	/* Extended Logins  */
+	void		*exlogin_buf;
+	dma_addr_t	exlogin_buf_dma;
+	int		exlogin_size;
+
+#define ENABLE_EXCHANGE_OFFLD	BIT_2
+
+	/* Exchange Offload */
+	void		*exchoffld_buf;
+	dma_addr_t	exchoffld_buf_dma;
+	int		exchoffld_size;
+	int 		exchoffld_count;
+
+	void		*swl;
+
+	/* These are used by mailbox operations. */
+	uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+	uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
+	uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
+
+	mbx_cmd_t	*mcp;
+	struct mbx_cmd_32	*mcp32;
+
+	unsigned long	mbx_cmd_flags;
+#define MBX_INTERRUPT		1
+#define MBX_INTR_WAIT		2
+#define MBX_UPDATE_FLASH_ACTIVE	3
+
+	struct mutex vport_lock;        /* Virtual port synchronization */
+	spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+	struct mutex mq_lock;        /* multi-queue synchronization */
+	struct completion mbx_cmd_comp; /* Serialize mbx access */
+	struct completion mbx_intr_comp;  /* Used for completion notification */
+	struct completion dcbx_comp;	/* For set port config notification */
+	struct completion lb_portup_comp; /* Used to wait for link up during
+					   * loopback */
+#define DCBX_COMP_TIMEOUT	20
+#define LB_PORTUP_COMP_TIMEOUT	10
+
+	int notify_dcbx_comp;
+	int notify_lb_portup_comp;
+	struct mutex selflogin_lock;
+
+	/* Basic firmware related information. */
+	uint16_t	fw_major_version;
+	uint16_t	fw_minor_version;
+	uint16_t	fw_subminor_version;
+	uint16_t	fw_attributes;
+	uint16_t	fw_attributes_h;
+	uint16_t	fw_attributes_ext[2];
+	uint32_t	fw_memory_size;
+	uint32_t	fw_transfer_size;
+	uint32_t	fw_srisc_address;
+#define RISC_START_ADDRESS_2100 0x1000
+#define RISC_START_ADDRESS_2300 0x800
+#define RISC_START_ADDRESS_2400 0x100000
+
+	uint16_t	orig_fw_tgt_xcb_count;
+	uint16_t	cur_fw_tgt_xcb_count;
+	uint16_t	orig_fw_xcb_count;
+	uint16_t	cur_fw_xcb_count;
+	uint16_t	orig_fw_iocb_count;
+	uint16_t	cur_fw_iocb_count;
+	uint16_t	fw_max_fcf_count;
+
+	uint32_t	fw_shared_ram_start;
+	uint32_t	fw_shared_ram_end;
+	uint32_t	fw_ddr_ram_start;
+	uint32_t	fw_ddr_ram_end;
+
+	uint16_t	fw_options[16];         /* slots: 1,2,3,10,11 */
+	uint8_t		fw_seriallink_options[4];
+	uint16_t	fw_seriallink_options24[4];
+
+	uint8_t		mpi_version[3];
+	uint32_t	mpi_capabilities;
+	uint8_t		phy_version[3];
+	uint8_t		pep_version[3];
+
+	/* Firmware dump template */
+	void		*fw_dump_template;
+	uint32_t	fw_dump_template_len;
+	/* Firmware dump information. */
+	struct qla2xxx_fw_dump *fw_dump;
+	uint32_t	fw_dump_len;
+	int		fw_dumped;
+	unsigned long	fw_dump_cap_flags;
+#define RISC_PAUSE_CMPL		0
+#define DMA_SHUTDOWN_CMPL	1
+#define ISP_RESET_CMPL		2
+#define RISC_RDY_AFT_RESET	3
+#define RISC_SRAM_DUMP_CMPL	4
+#define RISC_EXT_MEM_DUMP_CMPL	5
+#define ISP_MBX_RDY		6
+#define ISP_SOFT_RESET_CMPL	7
+	int		fw_dump_reading;
+	int		prev_minidump_failed;
+	dma_addr_t	eft_dma;
+	void		*eft;
+/* Current size of mctp dump is 0x086064 bytes */
+#define MCTP_DUMP_SIZE  0x086064
+	dma_addr_t	mctp_dump_dma;
+	void		*mctp_dump;
+	int		mctp_dumped;
+	int		mctp_dump_reading;
+	uint32_t	chain_offset;
+	struct dentry *dfs_dir;
+	struct dentry *dfs_fce;
+	struct dentry *dfs_tgt_counters;
+	struct dentry *dfs_fw_resource_cnt;
+
+	dma_addr_t	fce_dma;
+	void		*fce;
+	uint32_t	fce_bufs;
+	uint16_t	fce_mb[8];
+	uint64_t	fce_wr, fce_rd;
+	struct mutex	fce_mutex;
+
+	uint32_t	pci_attr;
+	uint16_t	chip_revision;
+
+	uint16_t	product_id[4];
+
+	uint8_t		model_number[16+1];
+#define BINZERO		"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+	char		model_desc[80];
+	uint8_t		adapter_id[16+1];
+
+	/* Option ROM information. */
+	char		*optrom_buffer;
+	uint32_t	optrom_size;
+	int		optrom_state;
+#define QLA_SWAITING	0
+#define QLA_SREADING	1
+#define QLA_SWRITING	2
+	uint32_t	optrom_region_start;
+	uint32_t	optrom_region_size;
+	struct mutex	optrom_mutex;
+
+/* PCI expansion ROM image information. */
+#define ROM_CODE_TYPE_BIOS	0
+#define ROM_CODE_TYPE_FCODE	1
+#define ROM_CODE_TYPE_EFI	3
+	uint8_t 	bios_revision[2];
+	uint8_t 	efi_revision[2];
+	uint8_t 	fcode_revision[16];
+	uint32_t	fw_revision[4];
+
+	uint32_t	gold_fw_version[4];
+
+	/* Offsets for flash/nvram access (set to ~0 if not used). */
+	uint32_t	flash_conf_off;
+	uint32_t	flash_data_off;
+	uint32_t	nvram_conf_off;
+	uint32_t	nvram_data_off;
+
+	uint32_t	fdt_wrt_disable;
+	uint32_t	fdt_wrt_enable;
+	uint32_t	fdt_erase_cmd;
+	uint32_t	fdt_block_size;
+	uint32_t	fdt_unprotect_sec_cmd;
+	uint32_t	fdt_protect_sec_cmd;
+	uint32_t	fdt_wrt_sts_reg_cmd;
+
+	uint32_t        flt_region_flt;
+	uint32_t        flt_region_fdt;
+	uint32_t        flt_region_boot;
+	uint32_t        flt_region_boot_sec;
+	uint32_t        flt_region_fw;
+	uint32_t        flt_region_fw_sec;
+	uint32_t        flt_region_vpd_nvram;
+	uint32_t        flt_region_vpd;
+	uint32_t        flt_region_vpd_sec;
+	uint32_t        flt_region_nvram;
+	uint32_t        flt_region_npiv_conf;
+	uint32_t	flt_region_gold_fw;
+	uint32_t	flt_region_fcp_prio;
+	uint32_t	flt_region_bootload;
+	uint32_t	flt_region_img_status_pri;
+	uint32_t	flt_region_img_status_sec;
+	uint8_t         active_image;
+
+	/* Needed for BEACON */
+	uint16_t        beacon_blink_led;
+	uint8_t         beacon_color_state;
+#define QLA_LED_GRN_ON		0x01
+#define QLA_LED_YLW_ON		0x02
+#define QLA_LED_ABR_ON		0x04
+#define QLA_LED_ALL_ON		0x07	/* yellow, green, amber. */
+					/* ISP2322: red, green, amber. */
+	uint16_t        zio_mode;
+	uint16_t        zio_timer;
+
+	struct qla_msix_entry *msix_entries;
+
+	struct list_head        vp_list;        /* list of VP */
+	unsigned long   vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
+			sizeof(unsigned long)];
+	uint16_t        num_vhosts;     /* number of vports created */
+	uint16_t        num_vsans;      /* number of vsan created */
+	uint16_t        max_npiv_vports;        /* 63 or 125 per topoloty */
+	int             cur_vport_count;
+
+	struct qla_chip_state_84xx *cs84xx;
+	struct isp_operations *isp_ops;
+	struct workqueue_struct *wq;
+	struct qlfc_fw fw_buf;
+
+	/* FCP_CMND priority support */
+	struct qla_fcp_prio_cfg *fcp_prio_cfg;
+
+	struct dma_pool *dl_dma_pool;
+#define DSD_LIST_DMA_POOL_SIZE  512
+
+	struct dma_pool *fcp_cmnd_dma_pool;
+	mempool_t       *ctx_mempool;
+#define FCP_CMND_DMA_POOL_SIZE 512
+
+	void __iomem	*nx_pcibase;		/* Base I/O address */
+	void __iomem	*nxdb_rd_ptr;		/* Doorbell read pointer */
+	void __iomem	*nxdb_wr_ptr;		/* Door bell write pointer */
+
+	uint32_t	crb_win;
+	uint32_t	curr_window;
+	uint32_t	ddr_mn_window;
+	unsigned long	mn_win_crb;
+	unsigned long	ms_win_crb;
+	int		qdr_sn_window;
+	uint32_t	fcoe_dev_init_timeout;
+	uint32_t	fcoe_reset_timeout;
+	rwlock_t	hw_lock;
+	uint16_t	portnum;		/* port number */
+	int		link_width;
+	struct fw_blob	*hablob;
+	struct qla82xx_legacy_intr_set nx_legacy_intr;
+
+	uint16_t	gbl_dsd_inuse;
+	uint16_t	gbl_dsd_avail;
+	struct list_head gbl_dsd_list;
+#define NUM_DSD_CHAIN 4096
+
+	uint8_t fw_type;
+	__le32 file_prd_off;	/* File firmware product offset */
+
+	uint32_t	md_template_size;
+	void		*md_tmplt_hdr;
+	dma_addr_t      md_tmplt_hdr_dma;
+	void            *md_dump;
+	uint32_t	md_dump_size;
+
+	void		*loop_id_map;
+
+	/* QLA83XX IDC specific fields */
+	uint32_t	idc_audit_ts;
+	uint32_t	idc_extend_tmo;
+
+	/* DPC low-priority workqueue */
+	struct workqueue_struct *dpc_lp_wq;
+	struct work_struct idc_aen;
+	/* DPC high-priority workqueue */
+	struct workqueue_struct *dpc_hp_wq;
+	struct work_struct nic_core_reset;
+	struct work_struct idc_state_handler;
+	struct work_struct nic_core_unrecoverable;
+	struct work_struct board_disable;
+
+	struct mr_data_fx00 mr;
+
+	struct qlt_hw_data tgt;
+	int	allow_cna_fw_dump;
+	uint32_t fw_ability_mask;
+	uint16_t min_link_speed;
+	uint16_t max_speed_sup;
+
+	atomic_t        nvme_active_aen_cnt;
+	uint16_t        nvme_last_rptd_aen;             /* Last recorded aen count */
+};
+
+#define FW_ABILITY_MAX_SPEED_MASK	0xFUL
+#define FW_ABILITY_MAX_SPEED_16G	0x0
+#define FW_ABILITY_MAX_SPEED_32G	0x1
+#define FW_ABILITY_MAX_SPEED(ha)	\
+	(ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK)
+
+/*
+ * Qlogic scsi host structure
+ */
+typedef struct scsi_qla_host {
+	struct list_head list;
+	struct list_head vp_fcports;	/* list of fcports */
+	struct list_head work_list;
+	spinlock_t work_lock;
+	struct work_struct iocb_work;
+
+	/* Commonly used flags and state information. */
+	struct Scsi_Host *host;
+	unsigned long	host_no;
+	uint8_t		host_str[16];
+
+	volatile struct {
+		uint32_t	init_done		:1;
+		uint32_t	online			:1;
+		uint32_t	reset_active		:1;
+
+		uint32_t	management_server_logged_in :1;
+		uint32_t	process_response_queue	:1;
+		uint32_t	difdix_supported:1;
+		uint32_t	delete_progress:1;
+
+		uint32_t	fw_tgt_reported:1;
+		uint32_t	bbcr_enable:1;
+		uint32_t	qpairs_available:1;
+		uint32_t	qpairs_req_created:1;
+		uint32_t	qpairs_rsp_created:1;
+		uint32_t	nvme_enabled:1;
+	} flags;
+
+	atomic_t	loop_state;
+#define LOOP_TIMEOUT	1
+#define LOOP_DOWN	2
+#define LOOP_UP		3
+#define LOOP_UPDATE	4
+#define LOOP_READY	5
+#define LOOP_DEAD	6
+
+	unsigned long   relogin_jif;
+	unsigned long   dpc_flags;
+#define RESET_MARKER_NEEDED	0	/* Send marker to ISP. */
+#define RESET_ACTIVE		1
+#define ISP_ABORT_NEEDED	2	/* Initiate ISP abort. */
+#define ABORT_ISP_ACTIVE	3	/* ISP abort in progress. */
+#define LOOP_RESYNC_NEEDED	4	/* Device Resync needed. */
+#define LOOP_RESYNC_ACTIVE	5
+#define LOCAL_LOOP_UPDATE	6	/* Perform a local loop update. */
+#define RSCN_UPDATE		7	/* Perform an RSCN update. */
+#define RELOGIN_NEEDED		8
+#define REGISTER_FC4_NEEDED	9	/* SNS FC4 registration required. */
+#define ISP_ABORT_RETRY		10	/* ISP aborted. */
+#define BEACON_BLINK_NEEDED	11
+#define REGISTER_FDMI_NEEDED	12
+#define FCPORT_UPDATE_NEEDED	13
+#define VP_DPC_NEEDED		14	/* wake up for VP dpc handling */
+#define UNLOADING		15
+#define NPIV_CONFIG_NEEDED	16
+#define ISP_UNRECOVERABLE	17
+#define FCOE_CTX_RESET_NEEDED	18	/* Initiate FCoE context reset */
+#define MPI_RESET_NEEDED	19	/* Initiate MPI FW reset */
+#define ISP_QUIESCE_NEEDED	20	/* Driver need some quiescence */
+#define FREE_BIT 21
+#define PORT_UPDATE_NEEDED	22
+#define FX00_RESET_RECOVERY	23
+#define FX00_TARGET_SCAN	24
+#define FX00_CRITEMP_RECOVERY	25
+#define FX00_HOST_INFO_RESEND	26
+#define QPAIR_ONLINE_CHECK_NEEDED	27
+#define SET_ZIO_THRESHOLD_NEEDED	28
+#define DETECT_SFP_CHANGE	29
+
+	unsigned long	pci_flags;
+#define PFLG_DISCONNECTED	0	/* PCI device removed */
+#define PFLG_DRIVER_REMOVING	1	/* PCI driver .remove */
+#define PFLG_DRIVER_PROBING	2	/* PCI driver .probe */
+
+	uint32_t	device_flags;
+#define SWITCH_FOUND		BIT_0
+#define DFLG_NO_CABLE		BIT_1
+#define DFLG_DEV_FAILED		BIT_5
+
+	/* ISP configuration data. */
+	uint16_t	loop_id;		/* Host adapter loop id */
+	uint16_t        self_login_loop_id;     /* host adapter loop id
+						 * get it on self login
+						 */
+	fc_port_t       bidir_fcport;		/* fcport used for bidir cmnds
+						 * no need of allocating it for
+						 * each command
+						 */
+
+	port_id_t	d_id;			/* Host adapter port id */
+	uint8_t		marker_needed;
+	uint16_t	mgmt_svr_loop_id;
+
+
+
+	/* Timeout timers. */
+	uint8_t         loop_down_abort_time;    /* port down timer */
+	atomic_t        loop_down_timer;         /* loop down timer */
+	uint8_t         link_down_timeout;       /* link down timeout */
+
+	uint32_t        timer_active;
+	struct timer_list        timer;
+
+	uint8_t		node_name[WWN_SIZE];
+	uint8_t		port_name[WWN_SIZE];
+	uint8_t		fabric_node_name[WWN_SIZE];
+
+	struct		nvme_fc_local_port *nvme_local_port;
+	struct completion nvme_del_done;
+	struct list_head nvme_rport_list;
+	atomic_t 	nvme_active_aen_cnt;
+	uint16_t	nvme_last_rptd_aen;
+
+	uint16_t	fcoe_vlan_id;
+	uint16_t	fcoe_fcf_idx;
+	uint8_t		fcoe_vn_port_mac[6];
+
+	/* list of commands waiting on workqueue */
+	struct list_head	qla_cmd_list;
+	struct list_head	qla_sess_op_cmd_list;
+	struct list_head	unknown_atio_list;
+	spinlock_t		cmd_list_lock;
+	struct delayed_work	unknown_atio_work;
+
+	/* Counter to detect races between ELS and RSCN events */
+	atomic_t		generation_tick;
+	/* Time when global fcport update has been scheduled */
+	int			total_fcport_update_gen;
+	/* List of pending LOGOs, protected by tgt_mutex */
+	struct list_head	logo_list;
+	/* List of pending PLOGI acks, protected by hw lock */
+	struct list_head	plogi_ack_list;
+
+	struct list_head	qp_list;
+
+	uint32_t	vp_abort_cnt;
+
+	struct fc_vport	*fc_vport;	/* holds fc_vport * for each vport */
+	uint16_t        vp_idx;		/* vport ID */
+	struct qla_qpair *qpair;	/* base qpair */
+
+	unsigned long		vp_flags;
+#define VP_IDX_ACQUIRED		0	/* bit no 0 */
+#define VP_CREATE_NEEDED	1
+#define VP_BIND_NEEDED		2
+#define VP_DELETE_NEEDED	3
+#define VP_SCR_NEEDED		4	/* State Change Request registration */
+#define VP_CONFIG_OK		5	/* Flag to cfg VP, if FW is ready */
+	atomic_t 		vp_state;
+#define VP_OFFLINE		0
+#define VP_ACTIVE		1
+#define VP_FAILED		2
+// #define VP_DISABLE		3
+	uint16_t 	vp_err_state;
+	uint16_t	vp_prev_err_state;
+#define VP_ERR_UNKWN		0
+#define VP_ERR_PORTDWN		1
+#define VP_ERR_FAB_UNSUPPORTED	2
+#define VP_ERR_FAB_NORESOURCES	3
+#define VP_ERR_FAB_LOGOUT	4
+#define VP_ERR_ADAP_NORESOURCES	5
+	struct qla_hw_data *hw;
+	struct scsi_qlt_host vha_tgt;
+	struct req_que *req;
+	int		fw_heartbeat_counter;
+	int		seconds_since_last_heartbeat;
+	struct fc_host_statistics fc_host_stat;
+	struct qla_statistics qla_stats;
+	struct bidi_statistics bidi_stats;
+	atomic_t	vref_count;
+	struct qla8044_reset_template reset_tmplt;
+	uint16_t	bbcr;
+	struct name_list_extended gnl;
+	/* Count of active session/fcport */
+	int fcport_count;
+	wait_queue_head_t fcport_waitQ;
+	wait_queue_head_t vref_waitq;
+	uint8_t min_link_speed_feat;
+	struct list_head gpnid_list;
+} scsi_qla_host_t;
+
+struct qla27xx_image_status {
+	uint8_t image_status_mask;
+	uint16_t generation_number;
+	uint8_t reserved[3];
+	uint8_t ver_minor;
+	uint8_t ver_major;
+	uint32_t checksum;
+	uint32_t signature;
+} __packed;
+
+#define SET_VP_IDX	1
+#define SET_AL_PA	2
+#define RESET_VP_IDX	3
+#define RESET_AL_PA	4
+struct qla_tgt_vp_map {
+	uint8_t	idx;
+	scsi_qla_host_t *vha;
+};
+
+struct qla2_sgx {
+	dma_addr_t		dma_addr;	/* OUT */
+	uint32_t		dma_len;	/* OUT */
+
+	uint32_t		tot_bytes;	/* IN */
+	struct scatterlist	*cur_sg;	/* IN */
+
+	/* for book keeping, bzero on initial invocation */
+	uint32_t		bytes_consumed;
+	uint32_t		num_bytes;
+	uint32_t		tot_partial;
+
+	/* for debugging */
+	uint32_t		num_sg;
+	srb_t			*sp;
+};
+
+#define QLA_FW_STARTED(_ha) {			\
+	int i;					\
+	_ha->flags.fw_started = 1;		\
+	_ha->base_qpair->fw_started = 1;	\
+	for (i = 0; i < _ha->max_qpairs; i++) {	\
+	if (_ha->queue_pair_map[i])	\
+	_ha->queue_pair_map[i]->fw_started = 1;	\
+	}					\
+}
+
+#define QLA_FW_STOPPED(_ha) {			\
+	int i;					\
+	_ha->flags.fw_started = 0;		\
+	_ha->base_qpair->fw_started = 0;	\
+	for (i = 0; i < _ha->max_qpairs; i++) {	\
+	if (_ha->queue_pair_map[i])	\
+	_ha->queue_pair_map[i]->fw_started = 0;	\
+	}					\
+}
+
+/*
+ * Macros to help code, maintain, etc.
+ */
+#define LOOP_TRANSITION(ha) \
+	(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+	 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
+	 atomic_read(&ha->loop_state) == LOOP_DOWN)
+
+#define STATE_TRANSITION(ha) \
+		(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+			 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do {		\
+	atomic_inc(&__vha->vref_count);			\
+	mb();						\
+	if (__vha->flags.delete_progress) {		\
+		atomic_dec(&__vha->vref_count);		\
+		wake_up(&__vha->vref_waitq);		\
+		__bail = 1;				\
+	} else {					\
+		__bail = 0;				\
+	}						\
+} while (0)
+
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do {		\
+	atomic_dec(&__vha->vref_count);			\
+	wake_up(&__vha->vref_waitq);			\
+} while (0)						\
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do {	\
+	atomic_inc(&__qpair->ref_count);		\
+	mb();						\
+	if (__qpair->delete_in_progress) {		\
+		atomic_dec(&__qpair->ref_count);	\
+		__bail = 1;				\
+	} else {					\
+	       __bail = 0;				\
+	}						\
+} while (0)
+
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair)		\
+	atomic_dec(&__qpair->ref_count);		\
+
+
+#define QLA_ENA_CONF(_ha) {\
+    int i;\
+    _ha->base_qpair->enable_explicit_conf = 1;	\
+    for (i = 0; i < _ha->max_qpairs; i++) {	\
+	if (_ha->queue_pair_map[i])		\
+	    _ha->queue_pair_map[i]->enable_explicit_conf = 1; \
+    }						\
+}
+
+#define QLA_DIS_CONF(_ha) {\
+    int i;\
+    _ha->base_qpair->enable_explicit_conf = 0;	\
+    for (i = 0; i < _ha->max_qpairs; i++) {	\
+	if (_ha->queue_pair_map[i])		\
+	    _ha->queue_pair_map[i]->enable_explicit_conf = 0; \
+    }						\
+}
+
+/*
+ * qla2x00 local function return status codes
+ */
+#define MBS_MASK		0x3fff
+
+#define QLA_SUCCESS		(MBS_COMMAND_COMPLETE & MBS_MASK)
+#define QLA_INVALID_COMMAND	(MBS_INVALID_COMMAND & MBS_MASK)
+#define QLA_INTERFACE_ERROR	(MBS_HOST_INTERFACE_ERROR & MBS_MASK)
+#define QLA_TEST_FAILED		(MBS_TEST_FAILED & MBS_MASK)
+#define QLA_COMMAND_ERROR	(MBS_COMMAND_ERROR & MBS_MASK)
+#define QLA_PARAMETER_ERROR	(MBS_COMMAND_PARAMETER_ERROR & MBS_MASK)
+#define QLA_PORT_ID_USED	(MBS_PORT_ID_USED & MBS_MASK)
+#define QLA_LOOP_ID_USED	(MBS_LOOP_ID_USED & MBS_MASK)
+#define QLA_ALL_IDS_IN_USE	(MBS_ALL_IDS_IN_USE & MBS_MASK)
+#define QLA_NOT_LOGGED_IN	(MBS_NOT_LOGGED_IN & MBS_MASK)
+
+#define QLA_FUNCTION_TIMEOUT		0x100
+#define QLA_FUNCTION_PARAMETER_ERROR	0x101
+#define QLA_FUNCTION_FAILED		0x102
+#define QLA_MEMORY_ALLOC_FAILED		0x103
+#define QLA_LOCK_TIMEOUT		0x104
+#define QLA_ABORTED			0x105
+#define QLA_SUSPENDED			0x106
+#define QLA_BUSY			0x107
+#define QLA_ALREADY_REGISTERED		0x109
+
+#define NVRAM_DELAY()		udelay(10)
+
+/*
+ * Flash support definitions
+ */
+#define OPTROM_SIZE_2300	0x20000
+#define OPTROM_SIZE_2322	0x100000
+#define OPTROM_SIZE_24XX	0x100000
+#define OPTROM_SIZE_25XX	0x200000
+#define OPTROM_SIZE_81XX	0x400000
+#define OPTROM_SIZE_82XX	0x800000
+#define OPTROM_SIZE_83XX	0x1000000
+
+#define OPTROM_BURST_SIZE	0x1000
+#define OPTROM_BURST_DWORDS	(OPTROM_BURST_SIZE / 4)
+
+#define	QLA_DSDS_PER_IOCB	37
+
+#define CMD_SP(Cmnd)		((Cmnd)->SCp.ptr)
+
+#define QLA_SG_ALL	1024
+
+enum nexus_wait_type {
+	WAIT_HOST = 0,
+	WAIT_TARGET,
+	WAIT_LUN,
+};
+
+/* Refer to SNIA SFF 8247 */
+struct sff_8247_a0 {
+	u8 txid;	/* transceiver id */
+	u8 ext_txid;
+	u8 connector;
+	/* compliance code */
+	u8 eth_infi_cc3;	/* ethernet, inifiband */
+	u8 sonet_cc4[2];
+	u8 eth_cc6;
+	/* link length */
+#define FC_LL_VL BIT_7	/* very long */
+#define FC_LL_S  BIT_6	/* Short */
+#define FC_LL_I  BIT_5	/* Intermidiate*/
+#define FC_LL_L  BIT_4	/* Long */
+#define FC_LL_M  BIT_3	/* Medium */
+#define FC_LL_SA BIT_2	/* ShortWave laser */
+#define FC_LL_LC BIT_1	/* LongWave laser */
+#define FC_LL_EL BIT_0	/* Electrical inter enclosure */
+	u8 fc_ll_cc7;
+	/* FC technology */
+#define FC_TEC_EL BIT_7	/* Electrical inter enclosure */
+#define FC_TEC_SN BIT_6	/* short wave w/o OFC */
+#define FC_TEC_SL BIT_5	/* short wave with OFC */
+#define FC_TEC_LL BIT_4	/* Longwave Laser */
+#define FC_TEC_ACT BIT_3	/* Active cable */
+#define FC_TEC_PAS BIT_2	/* Passive cable */
+	u8 fc_tec_cc8;
+	/* Transmission Media */
+#define FC_MED_TW BIT_7	/* Twin Ax */
+#define FC_MED_TP BIT_6	/* Twited Pair */
+#define FC_MED_MI BIT_5	/* Min Coax */
+#define FC_MED_TV BIT_4	/* Video Coax */
+#define FC_MED_M6 BIT_3	/* Multimode, 62.5um */
+#define FC_MED_M5 BIT_2	/* Multimode, 50um */
+#define FC_MED_SM BIT_0	/* Single Mode */
+	u8 fc_med_cc9;
+	/* speed FC_SP_12: 12*100M = 1200 MB/s */
+#define FC_SP_12 BIT_7
+#define FC_SP_8  BIT_6
+#define FC_SP_16 BIT_5
+#define FC_SP_4  BIT_4
+#define FC_SP_32 BIT_3
+#define FC_SP_2  BIT_2
+#define FC_SP_1  BIT_0
+	u8 fc_sp_cc10;
+	u8 encode;
+	u8 bitrate;
+	u8 rate_id;
+	u8 length_km;		/* offset 14/eh */
+	u8 length_100m;
+	u8 length_50um_10m;
+	u8 length_62um_10m;
+	u8 length_om4_10m;
+	u8 length_om3_10m;
+#define SFF_VEN_NAME_LEN 16
+	u8 vendor_name[SFF_VEN_NAME_LEN];	/* offset 20/14h */
+	u8 tx_compat;
+	u8 vendor_oui[3];
+#define SFF_PART_NAME_LEN 16
+	u8 vendor_pn[SFF_PART_NAME_LEN];	/* part number */
+	u8 vendor_rev[4];
+	u8 wavelength[2];
+	u8 resv;
+	u8 cc_base;
+	u8 options[2];	/* offset 64 */
+	u8 br_max;
+	u8 br_min;
+	u8 vendor_sn[16];
+	u8 date_code[8];
+	u8 diag;
+	u8 enh_options;
+	u8 sff_revision;
+	u8 cc_ext;
+	u8 vendor_specific[32];
+	u8 resv2[128];
+};
+
+#define AUTO_DETECT_SFP_SUPPORT(_vha)\
+	(ql2xautodetectsfp && !_vha->vp_idx &&		\
+	(IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
+	IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
+
+#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
+	(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+
+#include "qla_target.h"
+#include "qla_gbl.h"
+#include "qla_dbg.h"
+#include "qla_inline.h"
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_devtbl.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_devtbl.h
new file mode 100644
index 0000000..ffb9694
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_devtbl.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define QLA_MODEL_NAMES		0x5C
+
+/*
+ * Adapter model names and descriptions.
+ */
+static char *qla2x00_model_name[QLA_MODEL_NAMES*2] = {
+	"QLA2340",	"133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x100 */
+	"QLA2342",	"133MHz PCI-X to 2Gb FC, Dual Channel",		/* 0x101 */
+	"QLA2344",	"133MHz PCI-X to 2Gb FC, Quad Channel",		/* 0x102 */
+	"QCP2342",	"cPCI to 2Gb FC, Dual Channel",			/* 0x103 */
+	"QSB2340",	"SBUS to 2Gb FC, Single Channel",		/* 0x104 */
+	"QSB2342",	"SBUS to 2Gb FC, Dual Channel",			/* 0x105 */
+	"QLA2310",	"Sun 66MHz PCI-X to 2Gb FC, Single Channel",	/* 0x106 */
+	"QLA2332",	"Sun 66MHz PCI-X to 2Gb FC, Single Channel",	/* 0x107 */
+	"QCP2332",	"Sun cPCI to 2Gb FC, Dual Channel",		/* 0x108 */
+	"QCP2340",	"cPCI to 2Gb FC, Single Channel",		/* 0x109 */
+	"QLA2342",	"Sun 133MHz PCI-X to 2Gb FC, Dual Channel",	/* 0x10a */
+	"QCP2342",	"Sun - cPCI to 2Gb FC, Dual Channel",		/* 0x10b */
+	"QLA2350",	"133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x10c */
+	"QLA2352",	"133MHz PCI-X to 2Gb FC, Dual Channel",		/* 0x10d */
+	"QLA2352",	"Sun 133MHz PCI-X to 2Gb FC, Dual Channel",	/* 0x10e */
+	" ",		" ",						/* 0x10f */
+	" ",		" ",						/* 0x110 */
+	" ",		" ",						/* 0x111 */
+	" ",		" ",						/* 0x112 */
+	" ",		" ",						/* 0x113 */
+	" ",		" ",						/* 0x114 */
+	"QLA2360",	"133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x115 */
+	"QLA2362",	"133MHz PCI-X to 2Gb FC, Dual Channel",		/* 0x116 */
+	"QLE2360",	"PCI-Express to 2Gb FC, Single Channel",	/* 0x117 */
+	"QLE2362",	"PCI-Express to 2Gb FC, Dual Channel",		/* 0x118 */
+	"QLA200",	"133MHz PCI-X to 2Gb FC Optical",		/* 0x119 */
+	" ",		" ",						/* 0x11a */
+	" ",		" ",						/* 0x11b */
+	"QLA200P",	"133MHz PCI-X to 2Gb FC SFP",			/* 0x11c */
+	" ",		" ",						/* 0x11d */
+	" ",		" ",						/* 0x11e */
+	" ",		" ",						/* 0x11f */
+	" ",		" ",						/* 0x120 */
+	" ",		" ",						/* 0x121 */
+	" ",		" ",						/* 0x122 */
+	" ",		" ",						/* 0x123 */
+	" ",		" ",						/* 0x124 */
+	" ",		" ",						/* 0x125 */
+	" ",		" ",						/* 0x126 */
+	" ",		" ",						/* 0x127 */
+	" ",		" ",						/* 0x128 */
+	" ",		" ",						/* 0x129 */
+	" ",		" ",						/* 0x12a */
+	" ",		" ",						/* 0x12b */
+	" ",		" ",						/* 0x12c */
+	" ",		" ",						/* 0x12d */
+	" ",		" ",						/* 0x12e */
+	"QLA210",	"133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x12f */
+	"EMC 250",	"133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x130 */
+	"HP A7538A",	"HP 1p2g PCI-X to 2Gb FC, Single Channel",	/* 0x131 */
+	"QLA210",	"Sun 133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x132 */
+	"QLA2460",	"PCI-X 2.0 to 4Gb FC, Single Channel",		/* 0x133 */
+	"QLA2462",	"PCI-X 2.0 to 4Gb FC, Dual Channel",		/* 0x134 */
+	"QMC2462",	"IBM eServer BC 4Gb FC Expansion Card",		/* 0x135 */
+	"QMC2462S",	"IBM eServer BC 4Gb FC Expansion Card SFF",	/* 0x136 */
+	"QLE2460",	"PCI-Express to 4Gb FC, Single Channel",	/* 0x137 */
+	"QLE2462",	"PCI-Express to 4Gb FC, Dual Channel",		/* 0x138 */
+	"QME2462",	"Dell BS PCI-Express to 4Gb FC, Dual Channel",	/* 0x139 */
+	" ",		" ",						/* 0x13a */
+	" ",		" ",						/* 0x13b */
+	" ",		" ",						/* 0x13c */
+	"QEM2462",	"Sun Server I/O Module 4Gb FC, Dual Channel",	/* 0x13d */
+	"QLE210",	"PCI-Express to 2Gb FC, Single Channel",	/* 0x13e */
+	"QLE220",	"PCI-Express to 4Gb FC, Single Channel",	/* 0x13f */
+	"QLA2460",	"Sun PCI-X 2.0 to 4Gb FC, Single Channel",	/* 0x140 */
+	"QLA2462",	"Sun PCI-X 2.0 to 4Gb FC, Dual Channel",	/* 0x141 */
+	"QLE2460",	"Sun PCI-Express to 2Gb FC, Single Channel",	/* 0x142 */
+	"QLE2462",	"Sun PCI-Express to 4Gb FC, Single Channel",	/* 0x143 */
+	"QEM2462",	"Server I/O Module 4Gb FC, Dual Channel",	/* 0x144 */
+	"QLE2440",	"PCI-Express to 4Gb FC, Single Channel",	/* 0x145 */
+	"QLE2464",	"PCI-Express to 4Gb FC, Quad Channel",		/* 0x146 */
+	"QLA2440",	"PCI-X 2.0 to 4Gb FC, Single Channel",		/* 0x147 */
+	"HP AE369A",	"PCI-X 2.0 to 4Gb FC, Dual Channel",		/* 0x148 */
+	"QLA2340",	"Sun 133MHz PCI-X to 2Gb FC, Single Channel",	/* 0x149 */
+	" ",		" ",						/* 0x14a */
+	" ",		" ",						/* 0x14b */
+	"QMC2432M",	"IBM eServer BC 4Gb FC Expansion Card CFFE",	/* 0x14c */
+	"QMC2422M",	"IBM eServer BC 4Gb FC Expansion Card CFFX",	/* 0x14d */
+	"QLE220",	"Sun PCI-Express to 4Gb FC, Single Channel",	/* 0x14e */
+	" ",		" ",						/* 0x14f */
+	" ",		" ",						/* 0x150 */
+	" ",		" ",						/* 0x151 */
+	"QME2462",	"PCI-Express to 4Gb FC, Dual Channel Mezz HBA",	/* 0x152 */
+	"QMH2462",	"PCI-Express to 4Gb FC, Dual Channel Mezz HBA",	/* 0x153 */
+	" ",		" ",						/* 0x154 */
+	"QLE220",	"PCI-Express to 4Gb FC, Single Channel",	/* 0x155 */
+	"QLE220",	"PCI-Express to 4Gb FC, Single Channel",	/* 0x156 */
+	" ",		" ",						/* 0x157 */
+	" ",		" ",						/* 0x158 */
+	" ",		" ",						/* 0x159 */
+	" ",		" ",						/* 0x15a */
+	"QME2472",	"Dell BS PCI-Express to 4Gb FC, Dual Channel",	/* 0x15b */
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dfs.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dfs.c
new file mode 100644
index 0000000..d231e71
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_dfs.c
@@ -0,0 +1,560 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *qla2x00_dfs_root;
+static atomic_t qla2x00_dfs_root_count;
+
+static int
+qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
+{
+	scsi_qla_host_t *vha = s->private;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	struct fc_port *sess = NULL;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+	seq_printf(s, "%s\n", vha->host_str);
+	if (tgt) {
+		seq_puts(s, "Port ID   Port Name                Handle\n");
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		list_for_each_entry(sess, &vha->vp_fcports, list)
+			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
+			    sess->d_id.b.domain, sess->d_id.b.area,
+			    sess->d_id.b.al_pa, sess->port_name,
+			    sess->loop_id);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	}
+
+	return 0;
+}
+
+static int
+qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
+{
+	scsi_qla_host_t *vha = inode->i_private;
+	return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
+}
+
+static const struct file_operations dfs_tgt_sess_ops = {
+	.open		= qla2x00_dfs_tgt_sess_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+	scsi_qla_host_t *vha = s->private;
+	struct qla_hw_data *ha = vha->hw;
+	struct gid_list_info *gid_list;
+	dma_addr_t gid_list_dma;
+	fc_port_t fc_port;
+	char *id_iter;
+	int rc, i;
+	uint16_t entries, loop_id;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+	seq_printf(s, "%s\n", vha->host_str);
+	if (tgt) {
+		gid_list = dma_alloc_coherent(&ha->pdev->dev,
+		    qla2x00_gid_list_size(ha),
+		    &gid_list_dma, GFP_KERNEL);
+		if (!gid_list) {
+			ql_dbg(ql_dbg_user, vha, 0x7018,
+			    "DMA allocation failed for %u\n",
+			     qla2x00_gid_list_size(ha));
+			return 0;
+		}
+
+		rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+		    &entries);
+		if (rc != QLA_SUCCESS)
+			goto out_free_id_list;
+
+		id_iter = (char *)gid_list;
+
+		seq_puts(s, "Port Name	Port ID 	Loop ID\n");
+
+		for (i = 0; i < entries; i++) {
+			struct gid_list_info *gid =
+			    (struct gid_list_info *)id_iter;
+			loop_id = le16_to_cpu(gid->loop_id);
+			memset(&fc_port, 0, sizeof(fc_port_t));
+
+			fc_port.loop_id = loop_id;
+
+			rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+			seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
+				fc_port.port_name, fc_port.d_id.b.domain,
+				fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+				fc_port.loop_id);
+			id_iter += ha->gid_list_info_size;
+		}
+out_free_id_list:
+		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+		    gid_list, gid_list_dma);
+	}
+
+	return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+	scsi_qla_host_t *vha = inode->i_private;
+
+	return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+	.open		= qla2x00_dfs_tgt_port_database_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int
+qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+{
+	struct scsi_qla_host *vha = s->private;
+	struct qla_hw_data *ha = vha->hw;
+
+	seq_puts(s, "FW Resource count\n\n");
+	seq_printf(s, "Original TGT exchg count[%d]\n",
+	    ha->orig_fw_tgt_xcb_count);
+	seq_printf(s, "current TGT exchg count[%d]\n",
+	    ha->cur_fw_tgt_xcb_count);
+	seq_printf(s, "original Initiator Exchange count[%d]\n",
+	    ha->orig_fw_xcb_count);
+	seq_printf(s, "Current Initiator Exchange count[%d]\n",
+	    ha->cur_fw_xcb_count);
+	seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
+	seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
+	seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
+	seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+
+	return 0;
+}
+
+static int
+qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
+{
+	struct scsi_qla_host *vha = inode->i_private;
+	return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
+}
+
+static const struct file_operations dfs_fw_resource_cnt_ops = {
+	.open           = qla_dfs_fw_resource_cnt_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static int
+qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
+{
+	struct scsi_qla_host *vha = s->private;
+	struct qla_qpair *qpair = vha->hw->base_qpair;
+	uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
+		core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
+		num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
+	u16 i;
+
+	qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
+	core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
+	qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
+	core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
+	qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
+	core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
+	num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
+	num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
+	num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
+
+	for (i = 0; i < vha->hw->max_qpairs; i++) {
+		qpair = vha->hw->queue_pair_map[i];
+		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
+		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
+		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
+		core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
+		qla_core_ret_sta_ctio +=
+		    qpair->tgt_counters.qla_core_ret_sta_ctio;
+		core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
+		num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
+		num_alloc_iocb_failed +=
+		    qpair->tgt_counters.num_alloc_iocb_failed;
+		num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
+	}
+
+	seq_puts(s, "Target Counters\n");
+	seq_printf(s, "qla_core_sbt_cmd = %lld\n",
+		qla_core_sbt_cmd);
+	seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
+		qla_core_ret_sta_ctio);
+	seq_printf(s, "qla_core_ret_ctio = %lld\n",
+		qla_core_ret_ctio);
+	seq_printf(s, "core_qla_que_buf = %lld\n",
+		core_qla_que_buf);
+	seq_printf(s, "core_qla_snd_status = %lld\n",
+		core_qla_snd_status);
+	seq_printf(s, "core_qla_free_cmd = %lld\n",
+		core_qla_free_cmd);
+	seq_printf(s, "num alloc iocb failed = %lld\n",
+		num_alloc_iocb_failed);
+	seq_printf(s, "num term exchange sent = %lld\n",
+		num_term_xchg_sent);
+	seq_printf(s, "num Q full sent = %lld\n",
+		num_q_full_sent);
+
+	/* DIF stats */
+	seq_printf(s, "DIF Inp Bytes = %lld\n",
+		vha->qla_stats.qla_dif_stats.dif_input_bytes);
+	seq_printf(s, "DIF Outp Bytes = %lld\n",
+		vha->qla_stats.qla_dif_stats.dif_output_bytes);
+	seq_printf(s, "DIF Inp Req = %lld\n",
+		vha->qla_stats.qla_dif_stats.dif_input_requests);
+	seq_printf(s, "DIF Outp Req = %lld\n",
+		vha->qla_stats.qla_dif_stats.dif_output_requests);
+	seq_printf(s, "DIF Guard err = %d\n",
+		vha->qla_stats.qla_dif_stats.dif_guard_err);
+	seq_printf(s, "DIF Ref tag err = %d\n",
+		vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+	seq_printf(s, "DIF App tag err = %d\n",
+		vha->qla_stats.qla_dif_stats.dif_app_tag_err);
+	return 0;
+}
+
+static int
+qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
+{
+	struct scsi_qla_host *vha = inode->i_private;
+	return single_open(file, qla_dfs_tgt_counters_show, vha);
+}
+
+static const struct file_operations dfs_tgt_counters_ops = {
+	.open           = qla_dfs_tgt_counters_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static int
+qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
+{
+	scsi_qla_host_t *vha = s->private;
+	uint32_t cnt;
+	uint32_t *fce;
+	uint64_t fce_start;
+	struct qla_hw_data *ha = vha->hw;
+
+	mutex_lock(&ha->fce_mutex);
+
+	seq_puts(s, "FCE Trace Buffer\n");
+	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
+	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
+	seq_puts(s, "FCE Enable Registers\n");
+	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
+	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
+	    ha->fce_mb[5], ha->fce_mb[6]);
+
+	fce = (uint32_t *) ha->fce;
+	fce_start = (unsigned long long) ha->fce_dma;
+	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
+		if (cnt % 8 == 0)
+			seq_printf(s, "\n%llx: ",
+			    (unsigned long long)((cnt * 4) + fce_start));
+		else
+			seq_putc(s, ' ');
+		seq_printf(s, "%08x", *fce++);
+	}
+
+	seq_puts(s, "\nEnd\n");
+
+	mutex_unlock(&ha->fce_mutex);
+
+	return 0;
+}
+
+static int
+qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
+{
+	scsi_qla_host_t *vha = inode->i_private;
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+
+	if (!ha->flags.fce_enabled)
+		goto out;
+
+	mutex_lock(&ha->fce_mutex);
+
+	/* Pause tracing to flush FCE buffers. */
+	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
+	if (rval)
+		ql_dbg(ql_dbg_user, vha, 0x705c,
+		    "DebugFS: Unable to disable FCE (%d).\n", rval);
+
+	ha->flags.fce_enabled = 0;
+
+	mutex_unlock(&ha->fce_mutex);
+out:
+	return single_open(file, qla2x00_dfs_fce_show, vha);
+}
+
+static int
+qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
+{
+	scsi_qla_host_t *vha = inode->i_private;
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+
+	if (ha->flags.fce_enabled)
+		goto out;
+
+	mutex_lock(&ha->fce_mutex);
+
+	/* Re-enable FCE tracing. */
+	ha->flags.fce_enabled = 1;
+	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
+	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
+	    ha->fce_mb, &ha->fce_bufs);
+	if (rval) {
+		ql_dbg(ql_dbg_user, vha, 0x700d,
+		    "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
+		ha->flags.fce_enabled = 0;
+	}
+
+	mutex_unlock(&ha->fce_mutex);
+out:
+	return single_release(inode, file);
+}
+
+static const struct file_operations dfs_fce_ops = {
+	.open		= qla2x00_dfs_fce_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= qla2x00_dfs_fce_release,
+};
+
+static int
+qla_dfs_naqp_show(struct seq_file *s, void *unused)
+{
+	struct scsi_qla_host *vha = s->private;
+	struct qla_hw_data *ha = vha->hw;
+
+	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
+	return 0;
+}
+
+static int
+qla_dfs_naqp_open(struct inode *inode, struct file *file)
+{
+	struct scsi_qla_host *vha = inode->i_private;
+
+	return single_open(file, qla_dfs_naqp_show, vha);
+}
+
+static ssize_t
+qla_dfs_naqp_write(struct file *file, const char __user *buffer,
+    size_t count, loff_t *pos)
+{
+	struct seq_file *s = file->private_data;
+	struct scsi_qla_host *vha = s->private;
+	struct qla_hw_data *ha = vha->hw;
+	char *buf;
+	int rc = 0;
+	unsigned long num_act_qp;
+
+	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
+		pr_err("host%ld: this adapter does not support Multi Q.",
+		    vha->host_no);
+		return -EINVAL;
+	}
+
+	if (!vha->flags.qpairs_available) {
+		pr_err("host%ld: Driver is not setup with Multi Q.",
+		    vha->host_no);
+		return -EINVAL;
+	}
+	buf = memdup_user_nul(buffer, count);
+	if (IS_ERR(buf)) {
+		pr_err("host%ld: fail to copy user buffer.",
+		    vha->host_no);
+		return PTR_ERR(buf);
+	}
+
+	num_act_qp = simple_strtoul(buf, NULL, 0);
+
+	if (num_act_qp >= vha->hw->max_qpairs) {
+		pr_err("User set invalid number of qpairs %lu. Max = %d",
+		    num_act_qp, vha->hw->max_qpairs);
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	if (num_act_qp != ha->tgt.num_act_qpairs) {
+		ha->tgt.num_act_qpairs = num_act_qp;
+		qlt_clr_qp_table(vha);
+	}
+	rc = count;
+out_free:
+	kfree(buf);
+	return rc;
+}
+
+static const struct file_operations dfs_naqp_ops = {
+	.open		= qla_dfs_naqp_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= qla_dfs_naqp_write,
+};
+
+
+int
+qla2x00_dfs_setup(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha))
+		goto out;
+	if (!ha->fce)
+		goto out;
+
+	if (qla2x00_dfs_root)
+		goto create_dir;
+
+	atomic_set(&qla2x00_dfs_root_count, 0);
+	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
+	if (!qla2x00_dfs_root) {
+		ql_log(ql_log_warn, vha, 0x00f7,
+		    "Unable to create debugfs root directory.\n");
+		goto out;
+	}
+
+create_dir:
+	if (ha->dfs_dir)
+		goto create_nodes;
+
+	mutex_init(&ha->fce_mutex);
+	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
+	if (!ha->dfs_dir) {
+		ql_log(ql_log_warn, vha, 0x00f8,
+		    "Unable to create debugfs ha directory.\n");
+		goto out;
+	}
+
+	atomic_inc(&qla2x00_dfs_root_count);
+
+create_nodes:
+	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
+	    S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
+	if (!ha->dfs_fw_resource_cnt) {
+		ql_log(ql_log_warn, vha, 0x00fd,
+		    "Unable to create debugFS fw_resource_count node.\n");
+		goto out;
+	}
+
+	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
+	    ha->dfs_dir, vha, &dfs_tgt_counters_ops);
+	if (!ha->dfs_tgt_counters) {
+		ql_log(ql_log_warn, vha, 0xd301,
+		    "Unable to create debugFS tgt_counters node.\n");
+		goto out;
+	}
+
+	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+	    S_IRUSR,  ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+	if (!ha->tgt.dfs_tgt_port_database) {
+		ql_log(ql_log_warn, vha, 0xd03f,
+		    "Unable to create debugFS tgt_port_database node.\n");
+		goto out;
+	}
+
+	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
+	    &dfs_fce_ops);
+	if (!ha->dfs_fce) {
+		ql_log(ql_log_warn, vha, 0x00f9,
+		    "Unable to create debugfs fce node.\n");
+		goto out;
+	}
+
+	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
+		S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
+	if (!ha->tgt.dfs_tgt_sess) {
+		ql_log(ql_log_warn, vha, 0xd040,
+		    "Unable to create debugFS tgt_sess node.\n");
+		goto out;
+	}
+
+	if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
+		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+		if (!ha->tgt.dfs_naqp) {
+			ql_log(ql_log_warn, vha, 0xd011,
+			    "Unable to create debugFS naqp node.\n");
+			goto out;
+		}
+	}
+out:
+	return 0;
+}
+
+int
+qla2x00_dfs_remove(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ha->tgt.dfs_naqp) {
+		debugfs_remove(ha->tgt.dfs_naqp);
+		ha->tgt.dfs_naqp = NULL;
+	}
+
+	if (ha->tgt.dfs_tgt_sess) {
+		debugfs_remove(ha->tgt.dfs_tgt_sess);
+		ha->tgt.dfs_tgt_sess = NULL;
+	}
+
+	if (ha->tgt.dfs_tgt_port_database) {
+		debugfs_remove(ha->tgt.dfs_tgt_port_database);
+		ha->tgt.dfs_tgt_port_database = NULL;
+	}
+
+	if (ha->dfs_fw_resource_cnt) {
+		debugfs_remove(ha->dfs_fw_resource_cnt);
+		ha->dfs_fw_resource_cnt = NULL;
+	}
+
+	if (ha->dfs_tgt_counters) {
+		debugfs_remove(ha->dfs_tgt_counters);
+		ha->dfs_tgt_counters = NULL;
+	}
+
+	if (ha->dfs_fce) {
+		debugfs_remove(ha->dfs_fce);
+		ha->dfs_fce = NULL;
+	}
+
+	if (ha->dfs_dir) {
+		debugfs_remove(ha->dfs_dir);
+		ha->dfs_dir = NULL;
+		atomic_dec(&qla2x00_dfs_root_count);
+	}
+
+	if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
+	    qla2x00_dfs_root) {
+		debugfs_remove(qla2x00_dfs_root);
+		qla2x00_dfs_root = NULL;
+	}
+
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_fw.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_fw.h
new file mode 100644
index 0000000..bec641a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_fw.h
@@ -0,0 +1,2081 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_FW_H
+#define __QLA_FW_H
+
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
+#define MBS_CHECKSUM_ERROR	0x4010
+#define MBS_INVALID_PRODUCT_KEY	0x4020
+
+/*
+ * Firmware Options.
+ */
+#define FO1_ENABLE_PUREX	BIT_10
+#define FO1_DISABLE_LED_CTRL	BIT_6
+#define FO1_ENABLE_8016		BIT_0
+#define FO2_ENABLE_SEL_CLASS2	BIT_5
+#define FO3_NO_ABTS_ON_LINKDOWN	BIT_14
+#define FO3_HOLD_STS_IOCB	BIT_12
+
+/*
+ * Port Database structure definition for ISP 24xx.
+ */
+#define PDO_FORCE_ADISC		BIT_1
+#define PDO_FORCE_PLOGI		BIT_0
+
+
+#define	PORT_DATABASE_24XX_SIZE		64
+struct port_database_24xx {
+	uint16_t flags;
+#define PDF_TASK_RETRY_ID	BIT_14
+#define PDF_FC_TAPE		BIT_7
+#define PDF_ACK0_CAPABLE	BIT_6
+#define PDF_FCP2_CONF		BIT_5
+#define PDF_CLASS_2		BIT_4
+#define PDF_HARD_ADDR		BIT_1
+
+	/*
+	 * for NVMe, the login_state field has been
+	 * split into nibbles.
+	 * The lower nibble is for FCP.
+	 * The upper nibble is for NVMe.
+	 */
+	uint8_t current_login_state;
+	uint8_t last_login_state;
+#define PDS_PLOGI_PENDING	0x03
+#define PDS_PLOGI_COMPLETE	0x04
+#define PDS_PRLI_PENDING	0x05
+#define PDS_PRLI_COMPLETE	0x06
+#define PDS_PORT_UNAVAILABLE	0x07
+#define PDS_PRLO_PENDING	0x09
+#define PDS_LOGO_PENDING	0x11
+#define PDS_PRLI2_PENDING	0x12
+
+	uint8_t hard_address[3];
+	uint8_t reserved_1;
+
+	uint8_t port_id[3];
+	uint8_t sequence_id;
+
+	uint16_t port_timer;
+
+	uint16_t nport_handle;			/* N_PORT handle. */
+
+	uint16_t receive_data_size;
+	uint16_t reserved_2;
+
+	uint8_t prli_svc_param_word_0[2];	/* Big endian */
+						/* Bits 15-0 of word 0 */
+	uint8_t prli_svc_param_word_3[2];	/* Big endian */
+						/* Bits 15-0 of word 3 */
+
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+
+	uint8_t reserved_3[4];
+	uint16_t prli_nvme_svc_param_word_0;	/* Bits 15-0 of word 0 */
+	uint16_t prli_nvme_svc_param_word_3;	/* Bits 15-0 of word 3 */
+	uint16_t nvme_first_burst_size;
+	uint8_t reserved_4[14];
+};
+
+/*
+ * MB 75h returns a list of DB entries similar to port_database_24xx(64B).
+ * However, in this case it returns 1st 40 bytes.
+ */
+struct get_name_list_extended {
+	__le16 flags;
+	u8 current_login_state;
+	u8 last_login_state;
+	u8 hard_address[3];
+	u8 reserved_1;
+	u8 port_id[3];
+	u8 sequence_id;
+	__le16 port_timer;
+	__le16 nport_handle;			/* N_PORT handle. */
+	__le16 receive_data_size;
+	__le16 reserved_2;
+
+	/* PRLI SVC Param are Big endian */
+	u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */
+	u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */
+	u8 port_name[WWN_SIZE];
+	u8 node_name[WWN_SIZE];
+};
+
+/* MB 75h: This is the short version of the database */
+struct get_name_list {
+	u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */
+	__le16 nport_handle;
+	u8 reserved;
+};
+
+struct vp_database_24xx {
+	uint16_t vp_status;
+	uint8_t  options;
+	uint8_t  id;
+	uint8_t  port_name[WWN_SIZE];
+	uint8_t  node_name[WWN_SIZE];
+	uint16_t port_id_low;
+	uint16_t port_id_high;
+};
+
+struct nvram_24xx {
+	/* NVRAM header. */
+	uint8_t id[4];
+	uint16_t nvram_version;
+	uint16_t reserved_0;
+
+	/* Firmware Initialization Control Block. */
+	uint16_t version;
+	uint16_t reserved_1;
+	__le16 frame_payload_size;
+	uint16_t execution_throttle;
+	uint16_t exchange_count;
+	uint16_t hard_address;
+
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+
+	uint16_t login_retry_count;
+	uint16_t link_down_on_nos;
+	uint16_t interrupt_delay_timer;
+	uint16_t login_timeout;
+
+	uint32_t firmware_options_1;
+	uint32_t firmware_options_2;
+	uint32_t firmware_options_3;
+
+	/* Offset 56. */
+
+	/*
+	 * BIT 0     = Control Enable
+	 * BIT 1-15  =
+	 *
+	 * BIT 0-7   = Reserved
+	 * BIT 8-10  = Output Swing 1G
+	 * BIT 11-13 = Output Emphasis 1G
+	 * BIT 14-15 = Reserved
+	 *
+	 * BIT 0-7   = Reserved
+	 * BIT 8-10  = Output Swing 2G
+	 * BIT 11-13 = Output Emphasis 2G
+	 * BIT 14-15 = Reserved
+	 *
+	 * BIT 0-7   = Reserved
+	 * BIT 8-10  = Output Swing 4G
+	 * BIT 11-13 = Output Emphasis 4G
+	 * BIT 14-15 = Reserved
+	 */
+	uint16_t seriallink_options[4];
+
+	uint16_t reserved_2[16];
+
+	/* Offset 96. */
+	uint16_t reserved_3[16];
+
+	/* PCIe table entries. */
+	uint16_t reserved_4[16];
+
+	/* Offset 160. */
+	uint16_t reserved_5[16];
+
+	/* Offset 192. */
+	uint16_t reserved_6[16];
+
+	/* Offset 224. */
+	uint16_t reserved_7[16];
+
+	/*
+	 * BIT 0  = Enable spinup delay
+	 * BIT 1  = Disable BIOS
+	 * BIT 2  = Enable Memory Map BIOS
+	 * BIT 3  = Enable Selectable Boot
+	 * BIT 4  = Disable RISC code load
+	 * BIT 5  = Disable Serdes
+	 * BIT 6  =
+	 * BIT 7  =
+	 *
+	 * BIT 8  =
+	 * BIT 9  =
+	 * BIT 10 = Enable lip full login
+	 * BIT 11 = Enable target reset
+	 * BIT 12 =
+	 * BIT 13 =
+	 * BIT 14 =
+	 * BIT 15 = Enable alternate WWN
+	 *
+	 * BIT 16-31 =
+	 */
+	uint32_t host_p;
+
+	uint8_t alternate_port_name[WWN_SIZE];
+	uint8_t alternate_node_name[WWN_SIZE];
+
+	uint8_t boot_port_name[WWN_SIZE];
+	uint16_t boot_lun_number;
+	uint16_t reserved_8;
+
+	uint8_t alt1_boot_port_name[WWN_SIZE];
+	uint16_t alt1_boot_lun_number;
+	uint16_t reserved_9;
+
+	uint8_t alt2_boot_port_name[WWN_SIZE];
+	uint16_t alt2_boot_lun_number;
+	uint16_t reserved_10;
+
+	uint8_t alt3_boot_port_name[WWN_SIZE];
+	uint16_t alt3_boot_lun_number;
+	uint16_t reserved_11;
+
+	/*
+	 * BIT 0 = Selective Login
+	 * BIT 1 = Alt-Boot Enable
+	 * BIT 2 = Reserved
+	 * BIT 3 = Boot Order List
+	 * BIT 4 = Reserved
+	 * BIT 5 = Selective LUN
+	 * BIT 6 = Reserved
+	 * BIT 7-31 =
+	 */
+	uint32_t efi_parameters;
+
+	uint8_t reset_delay;
+	uint8_t reserved_12;
+	uint16_t reserved_13;
+
+	uint16_t boot_id_number;
+	uint16_t reserved_14;
+
+	uint16_t max_luns_per_target;
+	uint16_t reserved_15;
+
+	uint16_t port_down_retry_count;
+	uint16_t link_down_timeout;
+
+	/* FCode parameters. */
+	uint16_t fcode_parameter;
+
+	uint16_t reserved_16[3];
+
+	/* Offset 352. */
+	uint8_t prev_drv_ver_major;
+	uint8_t prev_drv_ver_submajob;
+	uint8_t prev_drv_ver_minor;
+	uint8_t prev_drv_ver_subminor;
+
+	uint16_t prev_bios_ver_major;
+	uint16_t prev_bios_ver_minor;
+
+	uint16_t prev_efi_ver_major;
+	uint16_t prev_efi_ver_minor;
+
+	uint16_t prev_fw_ver_major;
+	uint8_t prev_fw_ver_minor;
+	uint8_t prev_fw_ver_subminor;
+
+	uint16_t reserved_17[8];
+
+	/* Offset 384. */
+	uint16_t reserved_18[16];
+
+	/* Offset 416. */
+	uint16_t reserved_19[16];
+
+	/* Offset 448. */
+	uint16_t reserved_20[16];
+
+	/* Offset 480. */
+	uint8_t model_name[16];
+
+	uint16_t reserved_21[2];
+
+	/* Offset 500. */
+	/* HW Parameter Block. */
+	uint16_t pcie_table_sig;
+	uint16_t pcie_table_offset;
+
+	uint16_t subsystem_vendor_id;
+	uint16_t subsystem_device_id;
+
+	uint32_t checksum;
+};
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define	ICB_VERSION 1
+struct init_cb_24xx {
+	uint16_t version;
+	uint16_t reserved_1;
+
+	uint16_t frame_payload_size;
+	uint16_t execution_throttle;
+	uint16_t exchange_count;
+
+	uint16_t hard_address;
+
+	uint8_t port_name[WWN_SIZE];		/* Big endian. */
+	uint8_t node_name[WWN_SIZE];		/* Big endian. */
+
+	uint16_t response_q_inpointer;
+	uint16_t request_q_outpointer;
+
+	uint16_t login_retry_count;
+
+	uint16_t prio_request_q_outpointer;
+
+	uint16_t response_q_length;
+	uint16_t request_q_length;
+
+	uint16_t link_down_on_nos;		/* Milliseconds. */
+
+	uint16_t prio_request_q_length;
+
+	uint32_t request_q_address[2];
+	uint32_t response_q_address[2];
+	uint32_t prio_request_q_address[2];
+
+	uint16_t msix;
+	uint16_t msix_atio;
+	uint8_t reserved_2[4];
+
+	uint16_t atio_q_inpointer;
+	uint16_t atio_q_length;
+	uint32_t atio_q_address[2];
+
+	uint16_t interrupt_delay_timer;		/* 100us increments. */
+	uint16_t login_timeout;
+
+	/*
+	 * BIT 0  = Enable Hard Loop Id
+	 * BIT 1  = Enable Fairness
+	 * BIT 2  = Enable Full-Duplex
+	 * BIT 3  = Reserved
+	 * BIT 4  = Enable Target Mode
+	 * BIT 5  = Disable Initiator Mode
+	 * BIT 6  = Acquire FA-WWN
+	 * BIT 7  = Enable D-port Diagnostics
+	 *
+	 * BIT 8  = Reserved
+	 * BIT 9  = Non Participating LIP
+	 * BIT 10 = Descending Loop ID Search
+	 * BIT 11 = Acquire Loop ID in LIPA
+	 * BIT 12 = Reserved
+	 * BIT 13 = Full Login after LIP
+	 * BIT 14 = Node Name Option
+	 * BIT 15-31 = Reserved
+	 */
+	uint32_t firmware_options_1;
+
+	/*
+	 * BIT 0  = Operation Mode bit 0
+	 * BIT 1  = Operation Mode bit 1
+	 * BIT 2  = Operation Mode bit 2
+	 * BIT 3  = Operation Mode bit 3
+	 * BIT 4  = Connection Options bit 0
+	 * BIT 5  = Connection Options bit 1
+	 * BIT 6  = Connection Options bit 2
+	 * BIT 7  = Enable Non part on LIHA failure
+	 *
+	 * BIT 8  = Enable Class 2
+	 * BIT 9  = Enable ACK0
+	 * BIT 10 = Reserved
+	 * BIT 11 = Enable FC-SP Security
+	 * BIT 12 = FC Tape Enable
+	 * BIT 13 = Reserved
+	 * BIT 14 = Enable Target PRLI Control
+	 * BIT 15-31 = Reserved
+	 */
+	uint32_t firmware_options_2;
+
+	/*
+	 * BIT 0  = Reserved
+	 * BIT 1  = Soft ID only
+	 * BIT 2  = Reserved
+	 * BIT 3  = Reserved
+	 * BIT 4  = FCP RSP Payload bit 0
+	 * BIT 5  = FCP RSP Payload bit 1
+	 * BIT 6  = Enable Receive Out-of-Order data frame handling
+	 * BIT 7  = Disable Automatic PLOGI on Local Loop
+	 *
+	 * BIT 8  = Reserved
+	 * BIT 9  = Enable Out-of-Order FCP_XFER_RDY relative offset handling
+	 * BIT 10 = Reserved
+	 * BIT 11 = Reserved
+	 * BIT 12 = Reserved
+	 * BIT 13 = Data Rate bit 0
+	 * BIT 14 = Data Rate bit 1
+	 * BIT 15 = Data Rate bit 2
+	 * BIT 16 = Enable 75 ohm Termination Select
+	 * BIT 17-28 = Reserved
+	 * BIT 29 = Enable response queue 0 in index shadowing
+	 * BIT 30 = Enable request queue 0 out index shadowing
+	 * BIT 31 = Reserved
+	 */
+	uint32_t firmware_options_3;
+	uint16_t qos;
+	uint16_t rid;
+	uint8_t  reserved_3[20];
+};
+
+/*
+ * ISP queue - command entry structure definition.
+ */
+#define COMMAND_BIDIRECTIONAL 0x75
+struct cmd_bidir {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined */
+	uint8_t entry_status;		/* Entry status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT hanlde. */
+
+	uint16_t timeout;		/* Commnad timeout. */
+
+	uint16_t wr_dseg_count;		/* Write Data segment count. */
+	uint16_t rd_dseg_count;		/* Read Data segment count. */
+
+	struct scsi_lun lun;		/* FCP LUN (BE). */
+
+	uint16_t control_flags;		/* Control flags. */
+#define BD_WRAP_BACK			BIT_3
+#define BD_READ_DATA			BIT_1
+#define BD_WRITE_DATA			BIT_0
+
+	uint16_t fcp_cmnd_dseg_len;		/* Data segment length. */
+	uint32_t fcp_cmnd_dseg_address[2];	/* Data segment address. */
+
+	uint16_t reserved[2];			/* Reserved */
+
+	uint32_t rd_byte_count;			/* Total Byte count Read. */
+	uint32_t wr_byte_count;			/* Total Byte count write. */
+
+	uint8_t port_id[3];			/* PortID of destination port.*/
+	uint8_t vp_index;
+
+	uint32_t fcp_data_dseg_address[2];	/* Data segment address. */
+	uint16_t fcp_data_dseg_len;		/* Data segment length. */
+};
+
+#define COMMAND_TYPE_6	0x48		/* Command Type 6 entry */
+struct cmd_type_6 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+	uint16_t timeout;		/* Command timeout. */
+
+	uint16_t dseg_count;		/* Data segment count. */
+
+	uint16_t fcp_rsp_dsd_len;	/* FCP_RSP DSD length. */
+
+	struct scsi_lun lun;		/* FCP LUN (BE). */
+
+	uint16_t control_flags;		/* Control flags. */
+#define CF_DIF_SEG_DESCR_ENABLE		BIT_3
+#define CF_DATA_SEG_DESCR_ENABLE	BIT_2
+#define CF_READ_DATA			BIT_1
+#define CF_WRITE_DATA			BIT_0
+
+	uint16_t fcp_cmnd_dseg_len;		/* Data segment length. */
+	uint32_t fcp_cmnd_dseg_address[2];	/* Data segment address. */
+
+	uint32_t fcp_rsp_dseg_address[2];	/* Data segment address. */
+
+	uint32_t byte_count;		/* Total byte count. */
+
+	uint8_t port_id[3];		/* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint32_t fcp_data_dseg_address[2];	/* Data segment address. */
+	uint32_t fcp_data_dseg_len;		/* Data segment length. */
+};
+
+#define COMMAND_TYPE_7	0x18		/* Command Type 7 entry */
+struct cmd_type_7 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+	uint16_t timeout;		/* Command timeout. */
+#define FW_MAX_TIMEOUT		0x1999
+
+	uint16_t dseg_count;		/* Data segment count. */
+	uint16_t reserved_1;
+
+	struct scsi_lun lun;		/* FCP LUN (BE). */
+
+	uint16_t task_mgmt_flags;	/* Task management flags. */
+#define TMF_CLEAR_ACA		BIT_14
+#define TMF_TARGET_RESET	BIT_13
+#define TMF_LUN_RESET		BIT_12
+#define TMF_CLEAR_TASK_SET	BIT_10
+#define TMF_ABORT_TASK_SET	BIT_9
+#define TMF_DSD_LIST_ENABLE	BIT_2
+#define TMF_READ_DATA		BIT_1
+#define TMF_WRITE_DATA		BIT_0
+
+	uint8_t task;
+#define TSK_SIMPLE		0
+#define TSK_HEAD_OF_QUEUE	1
+#define TSK_ORDERED		2
+#define TSK_ACA			4
+#define TSK_UNTAGGED		5
+
+	uint8_t crn;
+
+	uint8_t fcp_cdb[MAX_CMDSZ]; 	/* SCSI command words. */
+	uint32_t byte_count;		/* Total byte count. */
+
+	uint8_t port_id[3];		/* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_0_len;		/* Data segment 0 length. */
+};
+
+#define COMMAND_TYPE_CRC_2	0x6A	/* Command Type CRC_2 (Type 6)
+					 * (T10-DIF) */
+struct cmd_type_crc_2 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+	uint16_t timeout;		/* Command timeout. */
+
+	uint16_t dseg_count;		/* Data segment count. */
+
+	uint16_t fcp_rsp_dseg_len;	/* FCP_RSP DSD length. */
+
+	struct scsi_lun lun;		/* FCP LUN (BE). */
+
+	uint16_t control_flags;		/* Control flags. */
+
+	uint16_t fcp_cmnd_dseg_len;		/* Data segment length. */
+	uint32_t fcp_cmnd_dseg_address[2];	/* Data segment address. */
+
+	uint32_t fcp_rsp_dseg_address[2];	/* Data segment address. */
+
+	uint32_t byte_count;		/* Total byte count. */
+
+	uint8_t port_id[3];		/* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint32_t crc_context_address[2];	/* Data segment address. */
+	uint16_t crc_context_len;		/* Data segment length. */
+	uint16_t reserved_1;			/* MUST be set to 0. */
+};
+
+
+/*
+ * ISP queue - status entry structure definition.
+ */
+#define	STATUS_TYPE	0x03		/* Status entry. */
+struct sts_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t comp_status;		/* Completion status. */
+	uint16_t ox_id;			/* OX_ID used by the firmware. */
+
+	uint32_t residual_len;		/* FW calc residual transfer length. */
+
+	union {
+		uint16_t reserved_1;
+		uint16_t nvme_rsp_pyld_len;
+	};
+
+	uint16_t state_flags;		/* State flags. */
+#define SF_TRANSFERRED_DATA	BIT_11
+#define SF_NVME_ERSP            BIT_6
+#define SF_FCP_RSP_DMA		BIT_0
+
+	uint16_t retry_delay;
+	uint16_t scsi_status;		/* SCSI status. */
+#define SS_CONFIRMATION_REQ		BIT_12
+
+	uint32_t rsp_residual_count;	/* FCP RSP residual count. */
+
+	uint32_t sense_len;		/* FCP SENSE length. */
+
+	union {
+		struct {
+			uint32_t rsp_data_len;	/* FCP response data length  */
+			uint8_t data[28];	/* FCP rsp/sense information */
+		};
+		struct nvme_fc_ersp_iu nvme_ersp;
+		uint8_t nvme_ersp_data[32];
+	};
+
+	/*
+	 * If DIF Error is set in comp_status, these additional fields are
+	 * defined:
+	 *
+	 * !!! NOTE: Firmware sends expected/actual DIF data in big endian
+	 * format; but all of the "data" field gets swab32-d in the beginning
+	 * of qla2x00_status_entry().
+	 *
+	 * &data[10] : uint8_t report_runt_bg[2];	- computed guard
+	 * &data[12] : uint8_t actual_dif[8];		- DIF Data received
+	 * &data[20] : uint8_t expected_dif[8];		- DIF Data computed
+	*/
+};
+
+
+/*
+ * Status entry completion status
+ */
+#define CS_DATA_REASSEMBLY_ERROR 0x11	/* Data Reassembly Error.. */
+#define CS_ABTS_BY_TARGET	0x13	/* Target send ABTS to abort IOCB. */
+#define CS_FW_RESOURCE		0x2C	/* Firmware Resource Unavailable. */
+#define CS_TASK_MGMT_OVERRUN	0x30	/* Task management overrun (8+). */
+#define CS_ABORT_BY_TARGET	0x47	/* Abort By Target. */
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+#define MARKER_TYPE	0x04		/* Marker entry. */
+struct mrk_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint8_t modifier;		/* Modifier (7-0). */
+#define MK_SYNC_ID_LUN	0		/* Synchronize ID/LUN */
+#define MK_SYNC_ID	1		/* Synchronize ID */
+#define MK_SYNC_ALL	2		/* Synchronize all ID/LUN */
+	uint8_t reserved_1;
+
+	uint8_t reserved_2;
+	uint8_t vp_index;
+
+	uint16_t reserved_3;
+
+	uint8_t lun[8];			/* FCP LUN (BE). */
+	uint8_t reserved_4[40];
+};
+
+/*
+ * ISP queue - CT Pass-Through entry structure definition.
+ */
+#define CT_IOCB_TYPE		0x29	/* CT Pass-Through IOCB entry */
+struct ct_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System Defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t comp_status;		/* Completion status. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint16_t cmd_dsd_count;
+
+	uint8_t vp_index;
+	uint8_t reserved_1;
+
+	uint16_t timeout;		/* Command timeout. */
+	uint16_t reserved_2;
+
+	uint16_t rsp_dsd_count;
+
+	uint8_t reserved_3[10];
+
+	uint32_t rsp_byte_count;
+	uint32_t cmd_byte_count;
+
+	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_0_len;		/* Data segment 0 length. */
+	uint32_t dseg_1_address[2];	/* Data segment 1 address. */
+	uint32_t dseg_1_len;		/* Data segment 1 length. */
+};
+
+/*
+ * ISP queue - ELS Pass-Through entry structure definition.
+ */
+#define ELS_IOCB_TYPE		0x53	/* ELS Pass-Through IOCB entry */
+struct els_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System Defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t reserved_1;
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint16_t tx_dsd_count;
+
+	uint8_t vp_index;
+	uint8_t sof_type;
+#define EST_SOFI3		(1 << 4)
+#define EST_SOFI2		(3 << 4)
+
+	uint32_t rx_xchg_address;	/* Receive exchange address. */
+	uint16_t rx_dsd_count;
+
+	uint8_t opcode;
+	uint8_t reserved_2;
+
+	uint8_t port_id[3];
+	uint8_t reserved_3;
+
+	uint16_t reserved_4;
+
+	uint16_t control_flags;		/* Control flags. */
+#define ECF_PAYLOAD_DESCR_MASK	(BIT_15|BIT_14|BIT_13)
+#define EPD_ELS_COMMAND		(0 << 13)
+#define EPD_ELS_ACC		(1 << 13)
+#define EPD_ELS_RJT		(2 << 13)
+#define EPD_RX_XCHG		(3 << 13)
+#define ECF_CLR_PASSTHRU_PEND	BIT_12
+#define ECF_INCL_FRAME_HDR	BIT_11
+
+	uint32_t rx_byte_count;
+	uint32_t tx_byte_count;
+
+	uint32_t tx_address[2];		/* Data segment 0 address. */
+	uint32_t tx_len;		/* Data segment 0 length. */
+	uint32_t rx_address[2];		/* Data segment 1 address. */
+	uint32_t rx_len;		/* Data segment 1 length. */
+};
+
+struct els_sts_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System Defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t comp_status;
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint16_t reserved_1;
+
+	uint8_t vp_index;
+	uint8_t sof_type;
+
+	uint32_t rx_xchg_address;	/* Receive exchange address. */
+	uint16_t reserved_2;
+
+	uint8_t opcode;
+	uint8_t reserved_3;
+
+	uint8_t port_id[3];
+	uint8_t reserved_4;
+
+	uint16_t reserved_5;
+
+	uint16_t control_flags;		/* Control flags. */
+	uint32_t total_byte_count;
+	uint32_t error_subcode_1;
+	uint32_t error_subcode_2;
+};
+/*
+ * ISP queue - Mailbox Command entry structure definition.
+ */
+#define MBX_IOCB_TYPE	0x39
+struct mbx_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t mbx[28];
+};
+
+
+#define LOGINOUT_PORT_IOCB_TYPE	0x52	/* Login/Logout Port entry. */
+struct logio_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t comp_status;		/* Completion status. */
+#define CS_LOGIO_ERROR		0x31	/* Login/Logout IOCB error. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint16_t control_flags;		/* Control flags. */
+					/* Modifiers. */
+#define LCF_INCLUDE_SNS		BIT_10	/* Include SNS (FFFFFC) during LOGO. */
+#define LCF_FCP2_OVERRIDE	BIT_9	/* Set/Reset word 3 of PRLI. */
+#define LCF_CLASS_2		BIT_8	/* Enable class 2 during PLOGI. */
+#define LCF_FREE_NPORT		BIT_7	/* Release NPORT handle after LOGO. */
+#define LCF_EXPL_LOGO		BIT_6	/* Perform an explicit LOGO. */
+#define LCF_NVME_PRLI		BIT_6   /* Perform NVME FC4 PRLI */
+#define LCF_SKIP_PRLI		BIT_5	/* Skip PRLI after PLOGI. */
+#define LCF_IMPL_LOGO_ALL	BIT_5	/* Implicit LOGO to all ports. */
+#define LCF_COND_PLOGI		BIT_4	/* PLOGI only if not logged-in. */
+#define LCF_IMPL_LOGO		BIT_4	/* Perform an implicit LOGO. */
+#define LCF_IMPL_PRLO		BIT_4	/* Perform an implicit PRLO. */
+					/* Commands. */
+#define LCF_COMMAND_PLOGI	0x00	/* PLOGI. */
+#define LCF_COMMAND_PRLI	0x01	/* PRLI. */
+#define LCF_COMMAND_PDISC	0x02	/* PDISC. */
+#define LCF_COMMAND_ADISC	0x03	/* ADISC. */
+#define LCF_COMMAND_LOGO	0x08	/* LOGO. */
+#define LCF_COMMAND_PRLO	0x09	/* PRLO. */
+#define LCF_COMMAND_TPRLO	0x0A	/* TPRLO. */
+
+	uint8_t vp_index;
+	uint8_t reserved_1;
+
+	uint8_t port_id[3];		/* PortID of destination port. */
+
+	uint8_t rsp_size;		/* Response size in 32bit words. */
+
+	uint32_t io_parameter[11];	/* General I/O parameters. */
+#define LSC_SCODE_NOLINK	0x01
+#define LSC_SCODE_NOIOCB	0x02
+#define LSC_SCODE_NOXCB		0x03
+#define LSC_SCODE_CMD_FAILED	0x04
+#define LSC_SCODE_NOFABRIC	0x05
+#define LSC_SCODE_FW_NOT_READY	0x07
+#define LSC_SCODE_NOT_LOGGED_IN	0x09
+#define LSC_SCODE_NOPCB		0x0A
+
+#define LSC_SCODE_ELS_REJECT	0x18
+#define LSC_SCODE_CMD_PARAM_ERR	0x19
+#define LSC_SCODE_PORTID_USED	0x1A
+#define LSC_SCODE_NPORT_USED	0x1B
+#define LSC_SCODE_NONPORT	0x1C
+#define LSC_SCODE_LOGGED_IN	0x1D
+#define LSC_SCODE_NOFLOGI_ACC	0x1F
+};
+
+#define TSK_MGMT_IOCB_TYPE	0x14
+struct tsk_mgmt_entry {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+
+	uint16_t reserved_1;
+
+	uint16_t delay;			/* Activity delay in seconds. */
+
+	uint16_t timeout;		/* Command timeout. */
+
+	struct scsi_lun lun;		/* FCP LUN (BE). */
+
+	uint32_t control_flags;		/* Control Flags. */
+#define TCF_NOTMCMD_TO_TARGET	BIT_31
+#define TCF_LUN_RESET		BIT_4
+#define TCF_ABORT_TASK_SET	BIT_3
+#define TCF_CLEAR_TASK_SET	BIT_2
+#define TCF_TARGET_RESET	BIT_1
+#define TCF_CLEAR_ACA		BIT_0
+
+	uint8_t reserved_2[20];
+
+	uint8_t port_id[3];		/* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint8_t reserved_3[12];
+};
+
+#define ABORT_IOCB_TYPE	0x33
+struct abort_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t nport_handle;		/* N_PORT handle. */
+					/* or Completion status. */
+
+	uint16_t options;		/* Options. */
+#define AOF_NO_ABTS		BIT_0	/* Do not send any ABTS. */
+
+	uint32_t handle_to_abort;	/* System handle to abort. */
+
+	uint16_t req_que_no;
+	uint8_t reserved_1[30];
+
+	uint8_t port_id[3];		/* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint8_t reserved_2[12];
+};
+
+/*
+ * ISP I/O Register Set structure definitions.
+ */
+struct device_reg_24xx {
+	uint32_t flash_addr;		/* Flash/NVRAM BIOS address. */
+#define FARX_DATA_FLAG	BIT_31
+#define FARX_ACCESS_FLASH_CONF	0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA	0x7FF00000
+#define FARX_ACCESS_NVRAM_CONF	0x7FFF0000
+#define FARX_ACCESS_NVRAM_DATA	0x7FFE0000
+
+#define FA_NVRAM_FUNC0_ADDR	0x80
+#define FA_NVRAM_FUNC1_ADDR	0x180
+
+#define FA_NVRAM_VPD_SIZE	0x200
+#define FA_NVRAM_VPD0_ADDR	0x00
+#define FA_NVRAM_VPD1_ADDR	0x100
+
+#define FA_BOOT_CODE_ADDR	0x00000
+					/*
+					 * RISC code begins at offset 512KB
+					 * within flash. Consisting of two
+					 * contiguous RISC code segments.
+					 */
+#define FA_RISC_CODE_ADDR	0x20000
+#define FA_RISC_CODE_SEGMENTS	2
+
+#define FA_FLASH_DESCR_ADDR_24	0x11000
+#define FA_FLASH_LAYOUT_ADDR_24	0x11400
+#define FA_NPIV_CONF0_ADDR_24	0x16000
+#define FA_NPIV_CONF1_ADDR_24	0x17000
+
+#define FA_FW_AREA_ADDR		0x40000
+#define FA_VPD_NVRAM_ADDR	0x48000
+#define FA_FEATURE_ADDR		0x4C000
+#define FA_FLASH_DESCR_ADDR	0x50000
+#define FA_FLASH_LAYOUT_ADDR	0x50400
+#define FA_HW_EVENT0_ADDR	0x54000
+#define FA_HW_EVENT1_ADDR	0x54400
+#define FA_HW_EVENT_SIZE	0x200
+#define FA_HW_EVENT_ENTRY_SIZE	4
+#define FA_NPIV_CONF0_ADDR	0x5C000
+#define FA_NPIV_CONF1_ADDR	0x5D000
+#define FA_FCP_PRIO0_ADDR	0x10000
+#define FA_FCP_PRIO1_ADDR	0x12000
+
+/*
+ * Flash Error Log Event Codes.
+ */
+#define HW_EVENT_RESET_ERR	0xF00B
+#define HW_EVENT_ISP_ERR	0xF020
+#define HW_EVENT_PARITY_ERR	0xF022
+#define HW_EVENT_NVRAM_CHKSUM_ERR	0xF023
+#define HW_EVENT_FLASH_FW_ERR	0xF024
+
+	uint32_t flash_data;		/* Flash/NVRAM BIOS data. */
+
+	uint32_t ctrl_status;		/* Control/Status. */
+#define CSRX_FLASH_ACCESS_ERROR	BIT_18	/* Flash/NVRAM Access Error. */
+#define CSRX_DMA_ACTIVE		BIT_17	/* DMA Active status. */
+#define CSRX_DMA_SHUTDOWN	BIT_16	/* DMA Shutdown control status. */
+#define CSRX_FUNCTION		BIT_15	/* Function number. */
+					/* PCI-X Bus Mode. */
+#define CSRX_PCIX_BUS_MODE_MASK	(BIT_11|BIT_10|BIT_9|BIT_8)
+#define PBM_PCI_33MHZ		(0 << 8)
+#define PBM_PCIX_M1_66MHZ	(1 << 8)
+#define PBM_PCIX_M1_100MHZ	(2 << 8)
+#define PBM_PCIX_M1_133MHZ	(3 << 8)
+#define PBM_PCIX_M2_66MHZ	(5 << 8)
+#define PBM_PCIX_M2_100MHZ	(6 << 8)
+#define PBM_PCIX_M2_133MHZ	(7 << 8)
+#define PBM_PCI_66MHZ		(8 << 8)
+					/* Max Write Burst byte count. */
+#define CSRX_MAX_WRT_BURST_MASK	(BIT_5|BIT_4)
+#define MWB_512_BYTES		(0 << 4)
+#define MWB_1024_BYTES		(1 << 4)
+#define MWB_2048_BYTES		(2 << 4)
+#define MWB_4096_BYTES		(3 << 4)
+
+#define CSRX_64BIT_SLOT		BIT_2	/* PCI 64-Bit Bus Slot. */
+#define CSRX_FLASH_ENABLE	BIT_1	/* Flash BIOS Read/Write enable. */
+#define CSRX_ISP_SOFT_RESET	BIT_0	/* ISP soft reset. */
+
+	uint32_t ictrl;			/* Interrupt control. */
+#define ICRX_EN_RISC_INT	BIT_3	/* Enable RISC interrupts on PCI. */
+
+	uint32_t istatus;		/* Interrupt status. */
+#define ISRX_RISC_INT		BIT_3	/* RISC interrupt. */
+
+	uint32_t unused_1[2];		/* Gap. */
+
+					/* Request Queue. */
+	uint32_t req_q_in;		/*  In-Pointer. */
+	uint32_t req_q_out;		/*  Out-Pointer. */
+					/* Response Queue. */
+	uint32_t rsp_q_in;		/*  In-Pointer. */
+	uint32_t rsp_q_out;		/*  Out-Pointer. */
+					/* Priority Request Queue. */
+	uint32_t preq_q_in;		/*  In-Pointer. */
+	uint32_t preq_q_out;		/*  Out-Pointer. */
+
+	uint32_t unused_2[2];		/* Gap. */
+
+					/* ATIO Queue. */
+	uint32_t atio_q_in;		/*  In-Pointer. */
+	uint32_t atio_q_out;		/*  Out-Pointer. */
+
+	uint32_t host_status;
+#define HSRX_RISC_INT		BIT_15	/* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED	BIT_8	/* RISC Paused. */
+
+	uint32_t hccr;			/* Host command & control register. */
+					/* HCCR statuses. */
+#define HCCRX_HOST_INT		BIT_6	/* Host to RISC interrupt bit. */
+#define HCCRX_RISC_RESET	BIT_5	/* RISC Reset mode bit. */
+					/* HCCR commands. */
+					/* NOOP. */
+#define HCCRX_NOOP		0x00000000
+					/* Set RISC Reset. */
+#define HCCRX_SET_RISC_RESET	0x10000000
+					/* Clear RISC Reset. */
+#define HCCRX_CLR_RISC_RESET	0x20000000
+					/* Set RISC Pause. */
+#define HCCRX_SET_RISC_PAUSE	0x30000000
+					/* Releases RISC Pause. */
+#define HCCRX_REL_RISC_PAUSE	0x40000000
+					/* Set HOST to RISC interrupt. */
+#define HCCRX_SET_HOST_INT	0x50000000
+					/* Clear HOST to RISC interrupt. */
+#define HCCRX_CLR_HOST_INT	0x60000000
+					/* Clear RISC to PCI interrupt. */
+#define HCCRX_CLR_RISC_INT	0xA0000000
+
+	uint32_t gpiod;			/* GPIO Data register. */
+
+					/* LED update mask. */
+#define GPDX_LED_UPDATE_MASK	(BIT_20|BIT_19|BIT_18)
+					/* Data update mask. */
+#define GPDX_DATA_UPDATE_MASK	(BIT_17|BIT_16)
+					/* Data update mask. */
+#define GPDX_DATA_UPDATE_2_MASK	(BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
+					/* LED control mask. */
+#define GPDX_LED_COLOR_MASK	(BIT_4|BIT_3|BIT_2)
+					/* LED bit values. Color names as
+					 * referenced in fw spec.
+					 */
+#define GPDX_LED_YELLOW_ON	BIT_2
+#define GPDX_LED_GREEN_ON	BIT_3
+#define GPDX_LED_AMBER_ON	BIT_4
+					/* Data in/out. */
+#define GPDX_DATA_INOUT		(BIT_1|BIT_0)
+
+	uint32_t gpioe;			/* GPIO Enable register. */
+					/* Enable update mask. */
+#define GPEX_ENABLE_UPDATE_MASK	(BIT_17|BIT_16)
+					/* Enable update mask. */
+#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
+					/* Enable. */
+#define GPEX_ENABLE		(BIT_1|BIT_0)
+
+	uint32_t iobase_addr;		/* I/O Bus Base Address register. */
+
+	uint32_t unused_3[10];		/* Gap. */
+
+	uint16_t mailbox0;
+	uint16_t mailbox1;
+	uint16_t mailbox2;
+	uint16_t mailbox3;
+	uint16_t mailbox4;
+	uint16_t mailbox5;
+	uint16_t mailbox6;
+	uint16_t mailbox7;
+	uint16_t mailbox8;
+	uint16_t mailbox9;
+	uint16_t mailbox10;
+	uint16_t mailbox11;
+	uint16_t mailbox12;
+	uint16_t mailbox13;
+	uint16_t mailbox14;
+	uint16_t mailbox15;
+	uint16_t mailbox16;
+	uint16_t mailbox17;
+	uint16_t mailbox18;
+	uint16_t mailbox19;
+	uint16_t mailbox20;
+	uint16_t mailbox21;
+	uint16_t mailbox22;
+	uint16_t mailbox23;
+	uint16_t mailbox24;
+	uint16_t mailbox25;
+	uint16_t mailbox26;
+	uint16_t mailbox27;
+	uint16_t mailbox28;
+	uint16_t mailbox29;
+	uint16_t mailbox30;
+	uint16_t mailbox31;
+
+	uint32_t iobase_window;
+	uint32_t iobase_c4;
+	uint32_t iobase_c8;
+	uint32_t unused_4_1[6];		/* Gap. */
+	uint32_t iobase_q;
+	uint32_t unused_5[2];		/* Gap. */
+	uint32_t iobase_select;
+	uint32_t unused_6[2];		/* Gap. */
+	uint32_t iobase_sdata;
+};
+/* RISC-RISC semaphore register PCI offet */
+#define RISC_REGISTER_BASE_OFFSET	0x7010
+#define RISC_REGISTER_WINDOW_OFFET	0x6
+
+/* RISC-RISC semaphore/flag register (risc address 0x7016) */
+
+#define RISC_SEMAPHORE		0x1UL
+#define RISC_SEMAPHORE_WE	(RISC_SEMAPHORE << 16)
+#define RISC_SEMAPHORE_CLR	(RISC_SEMAPHORE_WE | 0x0UL)
+#define RISC_SEMAPHORE_SET	(RISC_SEMAPHORE_WE | RISC_SEMAPHORE)
+
+#define RISC_SEMAPHORE_FORCE		0x8000UL
+#define RISC_SEMAPHORE_FORCE_WE		(RISC_SEMAPHORE_FORCE << 16)
+#define RISC_SEMAPHORE_FORCE_CLR	(RISC_SEMAPHORE_FORCE_WE | 0x0UL)
+#define RISC_SEMAPHORE_FORCE_SET	\
+		(RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE)
+
+/* RISC semaphore timeouts (ms) */
+#define TIMEOUT_SEMAPHORE		2500
+#define TIMEOUT_SEMAPHORE_FORCE		2000
+#define TIMEOUT_TOTAL_ELAPSED		4500
+
+/* Trace Control *************************************************************/
+
+#define TC_AEN_DISABLE		0
+
+#define TC_EFT_ENABLE		4
+#define TC_EFT_DISABLE		5
+
+#define TC_FCE_ENABLE		8
+#define TC_FCE_OPTIONS		0
+#define TC_FCE_DEFAULT_RX_SIZE	2112
+#define TC_FCE_DEFAULT_TX_SIZE	2112
+#define TC_FCE_DISABLE		9
+#define TC_FCE_DISABLE_TRACE	BIT_0
+
+/* MID Support ***************************************************************/
+
+#define MIN_MULTI_ID_FABRIC	64	/* Must be power-of-2. */
+#define MAX_MULTI_ID_FABRIC	256	/* ... */
+
+struct mid_conf_entry_24xx {
+	uint16_t reserved_1;
+
+	/*
+	 * BIT 0  = Enable Hard Loop Id
+	 * BIT 1  = Acquire Loop ID in LIPA
+	 * BIT 2  = ID not Acquired
+	 * BIT 3  = Enable VP
+	 * BIT 4  = Enable Initiator Mode
+	 * BIT 5  = Disable Target Mode
+	 * BIT 6-7 = Reserved
+	 */
+	uint8_t options;
+
+	uint8_t hard_address;
+
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+};
+
+struct mid_init_cb_24xx {
+	struct init_cb_24xx init_cb;
+
+	uint16_t count;
+	uint16_t options;
+
+	struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
+};
+
+
+struct mid_db_entry_24xx {
+	uint16_t status;
+#define MDBS_NON_PARTIC		BIT_3
+#define MDBS_ID_ACQUIRED	BIT_1
+#define MDBS_ENABLED		BIT_0
+
+	uint8_t options;
+	uint8_t hard_address;
+
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+
+	uint8_t port_id[3];
+	uint8_t reserved_1;
+};
+
+/*
+ * Virtual Port Control IOCB
+ */
+#define VP_CTRL_IOCB_TYPE	0x30	/* Virtual Port Control entry. */
+struct vp_ctrl_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t vp_idx_failed;
+
+	uint16_t comp_status;		/* Completion status. */
+#define CS_VCE_IOCB_ERROR       0x01    /* Error processing IOCB */
+#define CS_VCE_ACQ_ID_ERROR	0x02	/* Error while acquireing ID. */
+#define CS_VCE_BUSY		0x05	/* Firmware not ready to accept cmd. */
+
+	uint16_t command;
+#define VCE_COMMAND_ENABLE_VPS	0x00	/* Enable VPs. */
+#define VCE_COMMAND_DISABLE_VPS	0x08	/* Disable VPs. */
+#define VCE_COMMAND_DISABLE_VPS_REINIT	0x09 /* Disable VPs and reinit link. */
+#define VCE_COMMAND_DISABLE_VPS_LOGO	0x0a /* Disable VPs and LOGO ports. */
+#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL        0x0b /* Disable VPs and LOGO ports. */
+
+	uint16_t vp_count;
+
+	uint8_t vp_idx_map[16];
+	uint16_t flags;
+	uint16_t id;
+	uint16_t reserved_4;
+	uint16_t hopct;
+	uint8_t reserved_5[24];
+};
+
+/*
+ * Modify Virtual Port Configuration IOCB
+ */
+#define VP_CONFIG_IOCB_TYPE	0x31	/* Virtual Port Config entry. */
+struct vp_config_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint16_t flags;
+#define CS_VF_BIND_VPORTS_TO_VF         BIT_0
+#define CS_VF_SET_QOS_OF_VPORTS         BIT_1
+#define CS_VF_SET_HOPS_OF_VPORTS        BIT_2
+
+	uint16_t comp_status;		/* Completion status. */
+#define CS_VCT_STS_ERROR	0x01	/* Specified VPs were not disabled. */
+#define CS_VCT_CNT_ERROR	0x02	/* Invalid VP count. */
+#define CS_VCT_ERROR		0x03	/* Unknown error. */
+#define CS_VCT_IDX_ERROR	0x02	/* Invalid VP index. */
+#define CS_VCT_BUSY		0x05	/* Firmware not ready to accept cmd. */
+
+	uint8_t command;
+#define VCT_COMMAND_MOD_VPS     0x00    /* Modify VP configurations. */
+#define VCT_COMMAND_MOD_ENABLE_VPS 0x01 /* Modify configuration & enable VPs. */
+
+	uint8_t vp_count;
+
+	uint8_t vp_index1;
+	uint8_t vp_index2;
+
+	uint8_t options_idx1;
+	uint8_t hard_address_idx1;
+	uint16_t reserved_vp1;
+	uint8_t port_name_idx1[WWN_SIZE];
+	uint8_t node_name_idx1[WWN_SIZE];
+
+	uint8_t options_idx2;
+	uint8_t hard_address_idx2;
+	uint16_t reserved_vp2;
+	uint8_t port_name_idx2[WWN_SIZE];
+	uint8_t node_name_idx2[WWN_SIZE];
+	uint16_t id;
+	uint16_t reserved_4;
+	uint16_t hopct;
+	uint8_t reserved_5[2];
+};
+
+#define VP_RPT_ID_IOCB_TYPE	0x32	/* Report ID Acquisition entry. */
+enum VP_STATUS {
+	VP_STAT_COMPL,
+	VP_STAT_FAIL,
+	VP_STAT_ID_CHG,
+	VP_STAT_SNS_TO,				/* timeout */
+	VP_STAT_SNS_RJT,
+	VP_STAT_SCR_TO,				/* timeout */
+	VP_STAT_SCR_RJT,
+};
+
+enum VP_FLAGS {
+	VP_FLAGS_CON_FLOOP = 1,
+	VP_FLAGS_CON_P2P = 2,
+	VP_FLAGS_CON_FABRIC = 3,
+	VP_FLAGS_NAME_VALID = BIT_5,
+};
+
+struct vp_rpt_id_entry_24xx {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+	uint32_t resv1;
+	uint8_t vp_acquired;
+	uint8_t vp_setup;
+	uint8_t vp_idx;		/* Format 0=reserved */
+	uint8_t vp_status;	/* Format 0=reserved */
+
+	uint8_t port_id[3];
+	uint8_t format;
+	union {
+		struct {
+			/* format 0 loop */
+			uint8_t vp_idx_map[16];
+			uint8_t reserved_4[32];
+		} f0;
+		struct {
+			/* format 1 fabric */
+			uint8_t vpstat1_subcode; /* vp_status=1 subcode */
+			uint8_t flags;
+			uint16_t fip_flags;
+			uint8_t rsv2[12];
+
+			uint8_t ls_rjt_vendor;
+			uint8_t ls_rjt_explanation;
+			uint8_t ls_rjt_reason;
+			uint8_t rsv3[5];
+
+			uint8_t port_name[8];
+			uint8_t node_name[8];
+			uint16_t bbcr;
+			uint8_t reserved_5[6];
+		} f1;
+		struct { /* format 2: N2N direct connect */
+		    uint8_t vpstat1_subcode;
+		    uint8_t flags;
+		    uint16_t rsv6;
+		    uint8_t rsv2[12];
+
+		    uint8_t ls_rjt_vendor;
+		    uint8_t ls_rjt_explanation;
+		    uint8_t ls_rjt_reason;
+		    uint8_t rsv3[5];
+
+		    uint8_t port_name[8];
+		    uint8_t node_name[8];
+		    uint32_t remote_nport_id;
+		    uint32_t reserved_5;
+		} f2;
+	} u;
+};
+
+#define VF_EVFP_IOCB_TYPE       0x26    /* Exchange Virtual Fabric Parameters entry. */
+struct vf_evfp_entry_24xx {
+        uint8_t entry_type;             /* Entry type. */
+        uint8_t entry_count;            /* Entry count. */
+        uint8_t sys_define;             /* System defined. */
+        uint8_t entry_status;           /* Entry Status. */
+
+        uint32_t handle;                /* System handle. */
+        uint16_t comp_status;           /* Completion status. */
+        uint16_t timeout;               /* timeout */
+        uint16_t adim_tagging_mode;
+
+        uint16_t vfport_id;
+        uint32_t exch_addr;
+
+        uint16_t nport_handle;          /* N_PORT handle. */
+        uint16_t control_flags;
+        uint32_t io_parameter_0;
+        uint32_t io_parameter_1;
+        uint32_t tx_address[2];         /* Data segment 0 address. */
+        uint32_t tx_len;                /* Data segment 0 length. */
+        uint32_t rx_address[2];         /* Data segment 1 address. */
+        uint32_t rx_len;                /* Data segment 1 length. */
+};
+
+/* END MID Support ***********************************************************/
+
+/* Flash Description Table ***************************************************/
+
+struct qla_fdt_layout {
+	uint8_t sig[4];
+	uint16_t version;
+	uint16_t len;
+	uint16_t checksum;
+	uint8_t unused1[2];
+	uint8_t model[16];
+	uint16_t man_id;
+	uint16_t id;
+	uint8_t flags;
+	uint8_t erase_cmd;
+	uint8_t alt_erase_cmd;
+	uint8_t wrt_enable_cmd;
+	uint8_t wrt_enable_bits;
+	uint8_t wrt_sts_reg_cmd;
+	uint8_t unprotect_sec_cmd;
+	uint8_t read_man_id_cmd;
+	uint32_t block_size;
+	uint32_t alt_block_size;
+	uint32_t flash_size;
+	uint32_t wrt_enable_data;
+	uint8_t read_id_addr_len;
+	uint8_t wrt_disable_bits;
+	uint8_t read_dev_id_len;
+	uint8_t chip_erase_cmd;
+	uint16_t read_timeout;
+	uint8_t protect_sec_cmd;
+	uint8_t unused2[65];
+};
+
+/* Flash Layout Table ********************************************************/
+
+struct qla_flt_location {
+	uint8_t sig[4];
+	uint16_t start_lo;
+	uint16_t start_hi;
+	uint8_t version;
+	uint8_t unused[5];
+	uint16_t checksum;
+};
+
+struct qla_flt_header {
+	uint16_t version;
+	uint16_t length;
+	uint16_t checksum;
+	uint16_t unused;
+};
+
+#define FLT_REG_FW		0x01
+#define FLT_REG_BOOT_CODE	0x07
+#define FLT_REG_VPD_0		0x14
+#define FLT_REG_NVRAM_0		0x15
+#define FLT_REG_VPD_1		0x16
+#define FLT_REG_NVRAM_1		0x17
+#define FLT_REG_VPD_2		0xD4
+#define FLT_REG_NVRAM_2		0xD5
+#define FLT_REG_VPD_3		0xD6
+#define FLT_REG_NVRAM_3		0xD7
+#define FLT_REG_FDT		0x1a
+#define FLT_REG_FLT		0x1c
+#define FLT_REG_HW_EVENT_0	0x1d
+#define FLT_REG_HW_EVENT_1	0x1f
+#define FLT_REG_NPIV_CONF_0	0x29
+#define FLT_REG_NPIV_CONF_1	0x2a
+#define FLT_REG_GOLD_FW		0x2f
+#define FLT_REG_FCP_PRIO_0	0x87
+#define FLT_REG_FCP_PRIO_1	0x88
+#define FLT_REG_CNA_FW		0x97
+#define FLT_REG_BOOT_CODE_8044	0xA2
+#define FLT_REG_FCOE_FW		0xA4
+#define FLT_REG_FCOE_NVRAM_0	0xAA
+#define FLT_REG_FCOE_NVRAM_1	0xAC
+
+/* 27xx */
+#define FLT_REG_IMG_PRI_27XX	0x95
+#define FLT_REG_IMG_SEC_27XX	0x96
+#define FLT_REG_FW_SEC_27XX	0x02
+#define FLT_REG_BOOTLOAD_SEC_27XX	0x9
+#define FLT_REG_VPD_SEC_27XX_0	0x50
+#define FLT_REG_VPD_SEC_27XX_1	0x52
+#define FLT_REG_VPD_SEC_27XX_2	0xD8
+#define FLT_REG_VPD_SEC_27XX_3	0xDA
+
+struct qla_flt_region {
+	uint32_t code;
+	uint32_t size;
+	uint32_t start;
+	uint32_t end;
+};
+
+/* Flash NPIV Configuration Table ********************************************/
+
+struct qla_npiv_header {
+	uint8_t sig[2];
+	uint16_t version;
+	uint16_t entries;
+	uint16_t unused[4];
+	uint16_t checksum;
+};
+
+struct qla_npiv_entry {
+	uint16_t flags;
+	uint16_t vf_id;
+	uint8_t q_qos;
+	uint8_t f_qos;
+	uint16_t unused1;
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+};
+
+/* 84XX Support **************************************************************/
+
+#define MBA_ISP84XX_ALERT	0x800f  /* Alert Notification. */
+#define A84_PANIC_RECOVERY	0x1
+#define A84_OP_LOGIN_COMPLETE	0x2
+#define A84_DIAG_LOGIN_COMPLETE	0x3
+#define A84_GOLD_LOGIN_COMPLETE	0x4
+
+#define MBC_ISP84XX_RESET	0x3a    /* Reset. */
+
+#define FSTATE_REMOTE_FC_DOWN	BIT_0
+#define FSTATE_NSL_LINK_DOWN	BIT_1
+#define FSTATE_IS_DIAG_FW	BIT_2
+#define FSTATE_LOGGED_IN	BIT_3
+#define FSTATE_WAITING_FOR_VERIFY	BIT_4
+
+#define VERIFY_CHIP_IOCB_TYPE	0x1B
+struct verify_chip_entry_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t options;
+#define VCO_DONT_UPDATE_FW	BIT_0
+#define VCO_FORCE_UPDATE	BIT_1
+#define VCO_DONT_RESET_UPDATE	BIT_2
+#define VCO_DIAG_FW		BIT_3
+#define VCO_END_OF_DATA		BIT_14
+#define VCO_ENABLE_DSD		BIT_15
+
+	uint16_t reserved_1;
+
+	uint16_t data_seg_cnt;
+	uint16_t reserved_2[3];
+
+	uint32_t fw_ver;
+	uint32_t exchange_address;
+
+	uint32_t reserved_3[3];
+	uint32_t fw_size;
+	uint32_t fw_seq_size;
+	uint32_t relative_offset;
+
+	uint32_t dseg_address[2];
+	uint32_t dseg_length;
+};
+
+struct verify_chip_rsp_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t comp_status;
+#define CS_VCS_CHIP_FAILURE	0x3
+#define CS_VCS_BAD_EXCHANGE	0x8
+#define CS_VCS_SEQ_COMPLETEi	0x40
+
+	uint16_t failure_code;
+#define VFC_CHECKSUM_ERROR	0x1
+#define VFC_INVALID_LEN		0x2
+#define VFC_ALREADY_IN_PROGRESS	0x8
+
+	uint16_t reserved_1[4];
+
+	uint32_t fw_ver;
+	uint32_t exchange_address;
+
+	uint32_t reserved_2[6];
+};
+
+#define ACCESS_CHIP_IOCB_TYPE	0x2B
+struct access_chip_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t options;
+#define ACO_DUMP_MEMORY		0x0
+#define ACO_LOAD_MEMORY		0x1
+#define ACO_CHANGE_CONFIG_PARAM	0x2
+#define ACO_REQUEST_INFO	0x3
+
+	uint16_t reserved1;
+
+	uint16_t dseg_count;
+	uint16_t reserved2[3];
+
+	uint32_t parameter1;
+	uint32_t parameter2;
+	uint32_t parameter3;
+
+	uint32_t reserved3[3];
+	uint32_t total_byte_cnt;
+	uint32_t reserved4;
+
+	uint32_t dseg_address[2];
+	uint32_t dseg_length;
+};
+
+struct access_chip_rsp_84xx {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_defined;
+	uint8_t entry_status;
+
+	uint32_t handle;
+
+	uint16_t comp_status;
+	uint16_t failure_code;
+	uint32_t residual_count;
+
+	uint32_t reserved[12];
+};
+
+/* 81XX Support **************************************************************/
+
+#define MBA_DCBX_START		0x8016
+#define MBA_DCBX_COMPLETE	0x8030
+#define MBA_FCF_CONF_ERR	0x8031
+#define MBA_DCBX_PARAM_UPDATE	0x8032
+#define MBA_IDC_COMPLETE	0x8100
+#define MBA_IDC_NOTIFY		0x8101
+#define MBA_IDC_TIME_EXT	0x8102
+
+#define MBC_IDC_ACK		0x101
+#define MBC_RESTART_MPI_FW	0x3d
+#define MBC_FLASH_ACCESS_CTRL	0x3e	/* Control flash access. */
+#define MBC_GET_XGMAC_STATS	0x7a
+#define MBC_GET_DCBX_PARAMS	0x51
+
+/*
+ * ISP83xx mailbox commands
+ */
+#define MBC_WRITE_REMOTE_REG		0x0001 /* Write remote register */
+#define MBC_READ_REMOTE_REG		0x0009 /* Read remote register */
+#define MBC_RESTART_NIC_FIRMWARE	0x003d /* Restart NIC firmware */
+#define MBC_SET_ACCESS_CONTROL		0x003e /* Access control command */
+
+/* Flash access control option field bit definitions */
+#define FAC_OPT_FORCE_SEMAPHORE		BIT_15
+#define FAC_OPT_REQUESTOR_ID		BIT_14
+#define FAC_OPT_CMD_SUBCODE		0xff
+
+/* Flash access control command subcodes */
+#define FAC_OPT_CMD_WRITE_PROTECT	0x00
+#define FAC_OPT_CMD_WRITE_ENABLE	0x01
+#define FAC_OPT_CMD_ERASE_SECTOR	0x02
+#define FAC_OPT_CMD_LOCK_SEMAPHORE	0x03
+#define FAC_OPT_CMD_UNLOCK_SEMAPHORE	0x04
+#define FAC_OPT_CMD_GET_SECTOR_SIZE	0x05
+
+/* enhanced features bit definitions */
+#define NEF_LR_DIST_ENABLE	BIT_0
+
+/* LR Distance bit positions */
+#define LR_DIST_NV_POS		2
+#define LR_DIST_FW_POS		12
+#define LR_DIST_FW_SHIFT	(LR_DIST_FW_POS - LR_DIST_NV_POS)
+#define LR_DIST_FW_FIELD(x)	((x) << LR_DIST_FW_SHIFT & 0xf000)
+
+struct nvram_81xx {
+	/* NVRAM header. */
+	uint8_t id[4];
+	uint16_t nvram_version;
+	uint16_t reserved_0;
+
+	/* Firmware Initialization Control Block. */
+	uint16_t version;
+	uint16_t reserved_1;
+	uint16_t frame_payload_size;
+	uint16_t execution_throttle;
+	uint16_t exchange_count;
+	uint16_t reserved_2;
+
+	uint8_t port_name[WWN_SIZE];
+	uint8_t node_name[WWN_SIZE];
+
+	uint16_t login_retry_count;
+	uint16_t reserved_3;
+	uint16_t interrupt_delay_timer;
+	uint16_t login_timeout;
+
+	uint32_t firmware_options_1;
+	uint32_t firmware_options_2;
+	uint32_t firmware_options_3;
+
+	uint16_t reserved_4[4];
+
+	/* Offset 64. */
+	uint8_t enode_mac[6];
+	uint16_t reserved_5[5];
+
+	/* Offset 80. */
+	uint16_t reserved_6[24];
+
+	/* Offset 128. */
+	uint16_t ex_version;
+	uint8_t prio_fcf_matching_flags;
+	uint8_t reserved_6_1[3];
+	uint16_t pri_fcf_vlan_id;
+	uint8_t pri_fcf_fabric_name[8];
+	uint16_t reserved_6_2[7];
+	uint8_t spma_mac_addr[6];
+	uint16_t reserved_6_3[14];
+
+	/* Offset 192. */
+	uint8_t min_link_speed;
+	uint8_t reserved_7_0;
+	uint16_t reserved_7[31];
+
+	/*
+	 * BIT 0  = Enable spinup delay
+	 * BIT 1  = Disable BIOS
+	 * BIT 2  = Enable Memory Map BIOS
+	 * BIT 3  = Enable Selectable Boot
+	 * BIT 4  = Disable RISC code load
+	 * BIT 5  = Disable Serdes
+	 * BIT 6  = Opt boot mode
+	 * BIT 7  = Interrupt enable
+	 *
+	 * BIT 8  = EV Control enable
+	 * BIT 9  = Enable lip reset
+	 * BIT 10 = Enable lip full login
+	 * BIT 11 = Enable target reset
+	 * BIT 12 = Stop firmware
+	 * BIT 13 = Enable nodename option
+	 * BIT 14 = Default WWPN valid
+	 * BIT 15 = Enable alternate WWN
+	 *
+	 * BIT 16 = CLP LUN string
+	 * BIT 17 = CLP Target string
+	 * BIT 18 = CLP BIOS enable string
+	 * BIT 19 = CLP Serdes string
+	 * BIT 20 = CLP WWPN string
+	 * BIT 21 = CLP WWNN string
+	 * BIT 22 =
+	 * BIT 23 =
+	 * BIT 24 = Keep WWPN
+	 * BIT 25 = Temp WWPN
+	 * BIT 26-31 =
+	 */
+	uint32_t host_p;
+
+	uint8_t alternate_port_name[WWN_SIZE];
+	uint8_t alternate_node_name[WWN_SIZE];
+
+	uint8_t boot_port_name[WWN_SIZE];
+	uint16_t boot_lun_number;
+	uint16_t reserved_8;
+
+	uint8_t alt1_boot_port_name[WWN_SIZE];
+	uint16_t alt1_boot_lun_number;
+	uint16_t reserved_9;
+
+	uint8_t alt2_boot_port_name[WWN_SIZE];
+	uint16_t alt2_boot_lun_number;
+	uint16_t reserved_10;
+
+	uint8_t alt3_boot_port_name[WWN_SIZE];
+	uint16_t alt3_boot_lun_number;
+	uint16_t reserved_11;
+
+	/*
+	 * BIT 0 = Selective Login
+	 * BIT 1 = Alt-Boot Enable
+	 * BIT 2 = Reserved
+	 * BIT 3 = Boot Order List
+	 * BIT 4 = Reserved
+	 * BIT 5 = Selective LUN
+	 * BIT 6 = Reserved
+	 * BIT 7-31 =
+	 */
+	uint32_t efi_parameters;
+
+	uint8_t reset_delay;
+	uint8_t reserved_12;
+	uint16_t reserved_13;
+
+	uint16_t boot_id_number;
+	uint16_t reserved_14;
+
+	uint16_t max_luns_per_target;
+	uint16_t reserved_15;
+
+	uint16_t port_down_retry_count;
+	uint16_t link_down_timeout;
+
+	/* FCode parameters. */
+	uint16_t fcode_parameter;
+
+	uint16_t reserved_16[3];
+
+	/* Offset 352. */
+	uint8_t reserved_17[4];
+	uint16_t reserved_18[5];
+	uint8_t reserved_19[2];
+	uint16_t reserved_20[8];
+
+	/* Offset 384. */
+	uint8_t reserved_21[16];
+	uint16_t reserved_22[3];
+
+	/* Offset 406 (0x196) Enhanced Features
+	 * BIT 0    = Extended BB credits for LR
+	 * BIT 1    = Virtual Fabric Enable
+	 * BIT 2-5  = Distance Support if BIT 0 is on
+	 * BIT 6-15 = Unused
+	 */
+	uint16_t enhanced_features;
+	uint16_t reserved_24[4];
+
+	/* Offset 416. */
+	uint16_t reserved_25[32];
+
+	/* Offset 480. */
+	uint8_t model_name[16];
+
+	/* Offset 496. */
+	uint16_t feature_mask_l;
+	uint16_t feature_mask_h;
+	uint16_t reserved_26[2];
+
+	uint16_t subsystem_vendor_id;
+	uint16_t subsystem_device_id;
+
+	uint32_t checksum;
+};
+
+/*
+ * ISP Initialization Control Block.
+ * Little endian except where noted.
+ */
+#define	ICB_VERSION 1
+struct init_cb_81xx {
+	uint16_t version;
+	uint16_t reserved_1;
+
+	uint16_t frame_payload_size;
+	uint16_t execution_throttle;
+	uint16_t exchange_count;
+
+	uint16_t reserved_2;
+
+	uint8_t port_name[WWN_SIZE];		/* Big endian. */
+	uint8_t node_name[WWN_SIZE];		/* Big endian. */
+
+	uint16_t response_q_inpointer;
+	uint16_t request_q_outpointer;
+
+	uint16_t login_retry_count;
+
+	uint16_t prio_request_q_outpointer;
+
+	uint16_t response_q_length;
+	uint16_t request_q_length;
+
+	uint16_t reserved_3;
+
+	uint16_t prio_request_q_length;
+
+	uint32_t request_q_address[2];
+	uint32_t response_q_address[2];
+	uint32_t prio_request_q_address[2];
+
+	uint8_t reserved_4[8];
+
+	uint16_t atio_q_inpointer;
+	uint16_t atio_q_length;
+	uint32_t atio_q_address[2];
+
+	uint16_t interrupt_delay_timer;		/* 100us increments. */
+	uint16_t login_timeout;
+
+	/*
+	 * BIT 0-3 = Reserved
+	 * BIT 4  = Enable Target Mode
+	 * BIT 5  = Disable Initiator Mode
+	 * BIT 6  = Reserved
+	 * BIT 7  = Reserved
+	 *
+	 * BIT 8-13 = Reserved
+	 * BIT 14 = Node Name Option
+	 * BIT 15-31 = Reserved
+	 */
+	uint32_t firmware_options_1;
+
+	/*
+	 * BIT 0  = Operation Mode bit 0
+	 * BIT 1  = Operation Mode bit 1
+	 * BIT 2  = Operation Mode bit 2
+	 * BIT 3  = Operation Mode bit 3
+	 * BIT 4-7 = Reserved
+	 *
+	 * BIT 8  = Enable Class 2
+	 * BIT 9  = Enable ACK0
+	 * BIT 10 = Reserved
+	 * BIT 11 = Enable FC-SP Security
+	 * BIT 12 = FC Tape Enable
+	 * BIT 13 = Reserved
+	 * BIT 14 = Enable Target PRLI Control
+	 * BIT 15-31 = Reserved
+	 */
+	uint32_t firmware_options_2;
+
+	/*
+	 * BIT 0-3 = Reserved
+	 * BIT 4  = FCP RSP Payload bit 0
+	 * BIT 5  = FCP RSP Payload bit 1
+	 * BIT 6  = Enable Receive Out-of-Order data frame handling
+	 * BIT 7  = Reserved
+	 *
+	 * BIT 8  = Reserved
+	 * BIT 9  = Enable Out-of-Order FCP_XFER_RDY relative offset handling
+	 * BIT 10-16 = Reserved
+	 * BIT 17 = Enable multiple FCFs
+	 * BIT 18-20 = MAC addressing mode
+	 * BIT 21-25 = Ethernet data rate
+	 * BIT 26 = Enable ethernet header rx IOCB for ATIO q
+	 * BIT 27 = Enable ethernet header rx IOCB for response q
+	 * BIT 28 = SPMA selection bit 0
+	 * BIT 28 = SPMA selection bit 1
+	 * BIT 30-31 = Reserved
+	 */
+	uint32_t firmware_options_3;
+
+	uint8_t  reserved_5[8];
+
+	uint8_t enode_mac[6];
+
+	uint8_t reserved_6[10];
+};
+
+struct mid_init_cb_81xx {
+	struct init_cb_81xx init_cb;
+
+	uint16_t count;
+	uint16_t options;
+
+	struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
+};
+
+struct ex_init_cb_81xx {
+	uint16_t ex_version;
+	uint8_t prio_fcf_matching_flags;
+	uint8_t reserved_1[3];
+	uint16_t pri_fcf_vlan_id;
+	uint8_t pri_fcf_fabric_name[8];
+	uint16_t reserved_2[7];
+	uint8_t spma_mac_addr[6];
+	uint16_t reserved_3[14];
+};
+
+#define FARX_ACCESS_FLASH_CONF_81XX	0x7FFD0000
+#define FARX_ACCESS_FLASH_DATA_81XX	0x7F800000
+
+/* FCP priority config defines *************************************/
+/* operations */
+#define QLFC_FCP_PRIO_DISABLE           0x0
+#define QLFC_FCP_PRIO_ENABLE            0x1
+#define QLFC_FCP_PRIO_GET_CONFIG        0x2
+#define QLFC_FCP_PRIO_SET_CONFIG        0x3
+
+struct qla_fcp_prio_entry {
+	uint16_t flags;         /* Describes parameter(s) in FCP        */
+	/* priority entry that are valid        */
+#define FCP_PRIO_ENTRY_VALID            0x1
+#define FCP_PRIO_ENTRY_TAG_VALID        0x2
+#define FCP_PRIO_ENTRY_SPID_VALID       0x4
+#define FCP_PRIO_ENTRY_DPID_VALID       0x8
+#define FCP_PRIO_ENTRY_LUNB_VALID       0x10
+#define FCP_PRIO_ENTRY_LUNE_VALID       0x20
+#define FCP_PRIO_ENTRY_SWWN_VALID       0x40
+#define FCP_PRIO_ENTRY_DWWN_VALID       0x80
+	uint8_t  tag;           /* Priority value                   */
+	uint8_t  reserved;      /* Reserved for future use          */
+	uint32_t src_pid;       /* Src port id. high order byte     */
+				/* unused; -1 (wild card)           */
+	uint32_t dst_pid;       /* Src port id. high order byte     */
+	/* unused; -1 (wild card)           */
+	uint16_t lun_beg;       /* 1st lun num of lun range.        */
+				/* -1 (wild card)                   */
+	uint16_t lun_end;       /* 2nd lun num of lun range.        */
+				/* -1 (wild card)                   */
+	uint8_t  src_wwpn[8];   /* Source WWPN: -1 (wild card)      */
+	uint8_t  dst_wwpn[8];   /* Destination WWPN: -1 (wild card) */
+};
+
+struct qla_fcp_prio_cfg {
+	uint8_t  signature[4];  /* "HQOS" signature of config data  */
+	uint16_t version;       /* 1: Initial version               */
+	uint16_t length;        /* config data size in num bytes    */
+	uint16_t checksum;      /* config data bytes checksum       */
+	uint16_t num_entries;   /* Number of entries                */
+	uint16_t size_of_entry; /* Size of each entry in num bytes  */
+	uint8_t  attributes;    /* enable/disable, persistence      */
+#define FCP_PRIO_ATTR_DISABLE   0x0
+#define FCP_PRIO_ATTR_ENABLE    0x1
+#define FCP_PRIO_ATTR_PERSIST   0x2
+	uint8_t  reserved;      /* Reserved for future use          */
+#define FCP_PRIO_CFG_HDR_SIZE   0x10
+	struct qla_fcp_prio_entry entry[1];     /* fcp priority entries  */
+#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+};
+
+#define FCP_PRIO_CFG_SIZE       (32*1024) /* fcp prio data per port*/
+
+/* 25XX Support ****************************************************/
+#define FA_FCP_PRIO0_ADDR_25	0x3C000
+#define FA_FCP_PRIO1_ADDR_25	0x3E000
+
+/* 81XX Flash locations -- occupies second 2MB region. */
+#define FA_BOOT_CODE_ADDR_81	0x80000
+#define FA_RISC_CODE_ADDR_81	0xA0000
+#define FA_FW_AREA_ADDR_81	0xC0000
+#define FA_VPD_NVRAM_ADDR_81	0xD0000
+#define FA_VPD0_ADDR_81		0xD0000
+#define FA_VPD1_ADDR_81		0xD0400
+#define FA_NVRAM0_ADDR_81	0xD0080
+#define FA_NVRAM1_ADDR_81	0xD0180
+#define FA_FEATURE_ADDR_81	0xD4000
+#define FA_FLASH_DESCR_ADDR_81	0xD8000
+#define FA_FLASH_LAYOUT_ADDR_81	0xD8400
+#define FA_HW_EVENT0_ADDR_81	0xDC000
+#define FA_HW_EVENT1_ADDR_81	0xDC400
+#define FA_NPIV_CONF0_ADDR_81	0xD1000
+#define FA_NPIV_CONF1_ADDR_81	0xD2000
+
+/* 83XX Flash locations -- occupies second 8MB region. */
+#define FA_FLASH_LAYOUT_ADDR_83	0xFC400
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_gbl.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_gbl.h
new file mode 100644
index 0000000..8970634
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_gbl.h
@@ -0,0 +1,875 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_GBL_H
+#define	__QLA_GBL_H
+
+#include <linux/interrupt.h>
+
+/*
+ * Global Function Prototypes in qla_init.c source file.
+ */
+extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
+
+extern int qla2100_pci_config(struct scsi_qla_host *);
+extern int qla2300_pci_config(struct scsi_qla_host *);
+extern int qla24xx_pci_config(scsi_qla_host_t *);
+extern int qla25xx_pci_config(scsi_qla_host_t *);
+extern void qla2x00_reset_chip(struct scsi_qla_host *);
+extern void qla24xx_reset_chip(struct scsi_qla_host *);
+extern int qla2x00_chip_diag(struct scsi_qla_host *);
+extern int qla24xx_chip_diag(struct scsi_qla_host *);
+extern void qla2x00_config_rings(struct scsi_qla_host *);
+extern void qla24xx_config_rings(struct scsi_qla_host *);
+extern void qla2x00_reset_adapter(struct scsi_qla_host *);
+extern void qla24xx_reset_adapter(struct scsi_qla_host *);
+extern int qla2x00_nvram_config(struct scsi_qla_host *);
+extern int qla24xx_nvram_config(struct scsi_qla_host *);
+extern int qla81xx_nvram_config(struct scsi_qla_host *);
+extern void qla2x00_update_fw_options(struct scsi_qla_host *);
+extern void qla24xx_update_fw_options(scsi_qla_host_t *);
+extern void qla81xx_update_fw_options(scsi_qla_host_t *);
+extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
+extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
+
+extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
+extern int qla2x00_loop_resync(scsi_qla_host_t *);
+
+extern int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
+
+extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
+extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
+
+extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+
+extern void qla2x00_update_fcports(scsi_qla_host_t *);
+
+extern int qla2x00_abort_isp(scsi_qla_host_t *);
+extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
+extern void qla2x00_quiesce_io(scsi_qla_host_t *);
+
+extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
+
+extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
+extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
+
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
+
+extern void qla84xx_put_chip(struct scsi_qla_host *);
+
+extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
+    enum qla_work_type);
+extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
+int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
+extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
+extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+
+extern fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
+
+extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t);
+extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *);
+extern void qla83xx_idc_audit(scsi_qla_host_t *, int);
+extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
+extern void qla83xx_reset_ownership(scsi_qla_host_t *);
+extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+extern int qla2x00_init_rings(scsi_qla_host_t *);
+extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+	int, int, bool);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
+void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
+	struct imm_ntfy_from_isp *, int);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
+    void *);
+int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+/*
+ * Global Data in qla_os.c source file.
+ */
+extern char qla2x00_version_str[];
+
+extern struct kmem_cache *srb_cachep;
+extern struct kmem_cache *qla_tgt_plogi_cachep;
+
+extern int ql2xlogintimeout;
+extern int qlport_down_retry;
+extern int ql2xplogiabsentdevice;
+extern int ql2xloginretrycount;
+extern int ql2xfdmienable;
+extern int ql2xallocfwdump;
+extern int ql2xextended_error_logging;
+extern int ql2xiidmaenable;
+extern int ql2xmqsupport;
+extern int ql2xfwloadbin;
+extern int ql2xetsenable;
+extern int ql2xshiftctondsd;
+extern int ql2xdbwr;
+extern int ql2xasynctmfenable;
+extern int ql2xgffidenable;
+extern int ql2xenabledif;
+extern int ql2xenablehba_err_chk;
+extern int ql2xtargetreset;
+extern int ql2xdontresethba;
+extern uint64_t ql2xmaxlun;
+extern int ql2xmdcapmask;
+extern int ql2xmdenable;
+extern int ql2xexlogins;
+extern int ql2xexchoffld;
+extern int ql2xiniexchg;
+extern int ql2xfwholdabts;
+extern int ql2xmvasynctoatio;
+extern int ql2xuctrlirq;
+extern int ql2xnvmeenable;
+extern int ql2xautodetectsfp;
+
+extern int qla2x00_loop_reset(scsi_qla_host_t *);
+extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
+extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
+    fc_host_event_code, u32);
+extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
+extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
+    fc_port_t *, uint16_t *);
+extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
+    fc_port_t *, uint16_t *);
+extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
+extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
+extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
+extern void qla2x00_free_exchoffld_buffer(struct qla_hw_data *);
+
+extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
+
+extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
+	struct qla_hw_data *);
+extern void qla2x00_free_host(struct scsi_qla_host *);
+extern void qla2x00_relogin(struct scsi_qla_host *);
+extern void qla2x00_do_work(struct scsi_qla_host *);
+extern void qla2x00_free_fcports(struct scsi_qla_host *);
+
+extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
+extern void qla83xx_service_idc_aen(struct work_struct *);
+extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *);
+extern void qla83xx_idc_state_handler_work(struct work_struct *);
+extern void qla83xx_nic_core_reset_work(struct work_struct *);
+
+extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t);
+extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t);
+extern int qla83xx_idc_state_handler(scsi_qla_host_t *);
+extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
+extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
+
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
+extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *);
+extern void qla2xxx_qpair_sp_compl(void *, int);
+extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
+	uint16_t *);
+int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
+void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
+
+/*
+ * Global Functions in qla_mid.c source file.
+ */
+extern struct scsi_host_template qla2xxx_driver_template;
+extern struct scsi_transport_template *qla2xxx_transport_vport_template;
+extern void qla2x00_timer(scsi_qla_host_t *);
+extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
+extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
+extern int qla24xx_disable_vp (scsi_qla_host_t *);
+extern int qla24xx_enable_vp (scsi_qla_host_t *);
+extern int qla24xx_control_vp(scsi_qla_host_t *, int );
+extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
+extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
+extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
+extern int qla24xx_configure_vhba (scsi_qla_host_t *);
+extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
+    struct vp_rpt_id_entry_24xx *);
+extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
+extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
+extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
+
+extern void qla2x00_sp_free_dma(void *);
+extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
+
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
+
+extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
+
+extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
+extern int qla2x00_wait_for_chip_reset(scsi_qla_host_t *);
+extern int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *);
+
+extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
+extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
+extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
+	uint16_t *);
+extern int  qla2x00_vp_abort_isp(scsi_qla_host_t *);
+
+/*
+ * Global Function Prototypes in qla_iocb.c source file.
+ */
+
+extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
+extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
+extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+	uint16_t, struct req_que *);
+extern int qla2x00_start_scsi(srb_t *sp);
+extern int qla24xx_start_scsi(srb_t *sp);
+int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
+						uint16_t, uint64_t, uint8_t);
+extern int qla2x00_start_sp(srb_t *);
+extern int qla24xx_dif_start_scsi(srb_t *);
+extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
+extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
+
+extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
+extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
+	uint32_t *, uint16_t, struct qla_tc_param *);
+extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
+	uint32_t *, uint16_t, struct qla_tc_param *);
+extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
+	uint32_t *, uint16_t, struct qla_tc_param *);
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+	struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
+
+/*
+ * Global Function Prototypes in qla_mbx.c source file.
+ */
+extern int
+qla2x00_load_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
+extern int
+qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
+extern int
+qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
+
+extern int
+qla2x00_get_fw_version(scsi_qla_host_t *);
+
+extern int
+qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_set_fw_options(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_mbx_reg_test(scsi_qla_host_t *);
+
+extern int
+qla2x00_verify_checksum(scsi_qla_host_t *, uint32_t);
+
+extern int
+qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
+
+extern int
+qla2x00_abort_command(srb_t *);
+
+extern int
+qla2x00_abort_target(struct fc_port *, uint64_t, int);
+
+extern int
+qla2x00_lun_reset(struct fc_port *, uint64_t, int);
+
+extern int
+qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
+    uint8_t *, uint16_t *, uint16_t *);
+
+extern int
+qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
+
+extern int
+qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
+
+extern int
+qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
+
+extern int
+qla2x00_get_firmware_state(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
+
+extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
+extern int
+qla2x00_lip_reset(scsi_qla_host_t *);
+
+extern int
+qla2x00_send_sns(scsi_qla_host_t *, dma_addr_t, uint16_t, size_t);
+
+extern int
+qla2x00_login_fabric(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t,
+    uint16_t *, uint8_t);
+extern int
+qla24xx_login_fabric(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t,
+    uint16_t *, uint8_t);
+
+extern int
+qla2x00_login_local_device(scsi_qla_host_t *, fc_port_t *, uint16_t *,
+    uint8_t);
+
+extern int
+qla2x00_fabric_logout(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t);
+
+extern int
+qla24xx_fabric_logout(scsi_qla_host_t *, uint16_t, uint8_t, uint8_t, uint8_t);
+
+extern int
+qla2x00_full_login_lip(scsi_qla_host_t *ha);
+
+extern int
+qla2x00_get_id_list(scsi_qla_host_t *, void *, dma_addr_t, uint16_t *);
+
+extern int
+qla2x00_get_resource_cnts(scsi_qla_host_t *);
+
+extern int
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+
+extern int
+qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
+    dma_addr_t);
+
+extern int
+qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
+    dma_addr_t, uint16_t);
+
+extern int qla24xx_abort_command(srb_t *);
+extern int qla24xx_async_abort_command(srb_t *);
+extern int
+qla24xx_abort_target(struct fc_port *, uint64_t, int);
+extern int
+qla24xx_lun_reset(struct fc_port *, uint64_t, int);
+extern int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int,
+	uint64_t, enum nexus_wait_type);
+extern int
+qla2x00_system_error(scsi_qla_host_t *);
+
+extern int
+qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t);
+extern int
+qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
+
+extern int
+qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int
+qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
+qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
+
+extern int
+qla2x00_stop_firmware(scsi_qla_host_t *);
+
+extern int
+qla2x00_enable_eft_trace(scsi_qla_host_t *, dma_addr_t, uint16_t);
+extern int
+qla2x00_disable_eft_trace(scsi_qla_host_t *);
+
+extern int
+qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
+    uint32_t *);
+
+extern int
+qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+
+extern int
+qla82xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla25xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
+	uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern int
+qla2x00_write_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
+	uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern int
+qla2x00_set_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *);
+
+extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
+
+extern int qla81xx_idc_ack(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla81xx_fac_get_sector_size(scsi_qla_host_t *, uint32_t *);
+
+extern int
+qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
+
+extern int
+qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
+
+extern int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
+
+extern int
+qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
+
+extern int
+qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
+qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+
+extern int
+qla81xx_write_mpi_register(scsi_qla_host_t *, uint16_t *);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t,
+	uint16_t *);
+extern int
+qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
+
+extern int
+qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
+
+extern int
+qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+
+extern int
+qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+    uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+	struct port_database_24xx *);
+
+extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
+extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+
+/*
+ * Global Function Prototypes in qla_isr.c source file.
+ */
+extern irqreturn_t qla2100_intr_handler(int, void *);
+extern irqreturn_t qla2300_intr_handler(int, void *);
+extern irqreturn_t qla24xx_intr_handler(int, void *);
+extern void qla2x00_process_response_queue(struct rsp_que *);
+extern void
+qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
+extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
+extern void qla2x00_free_irqs(scsi_qla_host_t *);
+
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
+extern srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
+	void *);
+extern void
+qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
+	uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
+fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
+fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
+fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
+
+/*
+ * Global Function Prototypes in qla_sup.c source file.
+ */
+extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
+extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
+					 uint32_t, uint32_t);
+extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+					uint32_t);
+extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+					uint32_t);
+extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+				    uint32_t);
+extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+				    uint32_t);
+extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+					uint32_t);
+extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+				    uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
+
+extern int qla2x00_beacon_on(struct scsi_qla_host *);
+extern int qla2x00_beacon_off(struct scsi_qla_host *);
+extern void qla2x00_beacon_blink(struct scsi_qla_host *);
+extern int qla24xx_beacon_on(struct scsi_qla_host *);
+extern int qla24xx_beacon_off(struct scsi_qla_host *);
+extern void qla24xx_beacon_blink(struct scsi_qla_host *);
+extern void qla83xx_beacon_blink(struct scsi_qla_host *);
+extern int qla82xx_beacon_on(struct scsi_qla_host *);
+extern int qla82xx_beacon_off(struct scsi_qla_host *);
+extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
+extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
+extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
+				  uint32_t, uint16_t *);
+
+extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+					 uint32_t, uint32_t);
+extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+				     uint32_t, uint32_t);
+extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+					 uint32_t, uint32_t);
+extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+				     uint32_t, uint32_t);
+extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+					 uint32_t, uint32_t);
+extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
+					 uint8_t *, uint32_t, uint32_t);
+extern void qla8044_watchdog(struct scsi_qla_host *vha);
+
+extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *);
+
+extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
+extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
+
+extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
+
+/*
+ * Global Function Prototypes in qla_dbg.c source file.
+ */
+extern void qla2100_fw_dump(scsi_qla_host_t *, int);
+extern void qla2300_fw_dump(scsi_qla_host_t *, int);
+extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
+extern int qla27xx_fwdt_template_valid(void *);
+extern ulong qla27xx_fwdt_template_size(void *);
+extern const void *qla27xx_fwdt_template_default(void);
+extern ulong qla27xx_fwdt_template_default_size(void);
+
+extern void qla2x00_dump_regs(scsi_qla_host_t *);
+extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
+extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
+extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
+extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
+			   uint8_t *, uint32_t);
+extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
+
+/*
+ * Global Function Prototypes in qla_gs.c source file.
+ */
+extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *);
+extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *);
+extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *);
+extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *);
+extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_rft_id(scsi_qla_host_t *);
+extern int qla2x00_rff_id(scsi_qla_host_t *, u8);
+extern int qla2x00_rnn_id(scsi_qla_host_t *);
+extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
+extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla2x00_fdmi_register(scsi_qla_host_t *);
+extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
+extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
+extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
+	struct ct_sns_rsp *, const char *);
+extern void qla2x00_async_iocb_timeout(void *data);
+extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
+int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
+void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
+
+extern void qla2x00_free_fcport(fc_port_t *);
+
+extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
+extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
+void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
+void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
+
+int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
+void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+/*
+ * Global Function Prototypes in qla_attr.c source file.
+ */
+struct device_attribute;
+extern struct device_attribute *qla2x00_host_attrs[];
+struct fc_function_template;
+extern struct fc_function_template qla2xxx_transport_functions;
+extern struct fc_function_template qla2xxx_transport_vport_functions;
+extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
+extern void qla2x00_init_host_attr(scsi_qla_host_t *);
+extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
+extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *,
+	struct msg_echo_lb *, uint16_t *);
+extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
+extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
+	struct qla_fcp_prio_cfg *, uint8_t);
+
+/*
+ * Global Function Prototypes in qla_dfs.c source file.
+ */
+extern int qla2x00_dfs_setup(scsi_qla_host_t *);
+extern int qla2x00_dfs_remove(scsi_qla_host_t *);
+
+/* Globa function prototypes for multi-q */
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+	struct qla_msix_entry *, int);
+extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
+extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
+	uint16_t, int, uint8_t, bool);
+extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
+	uint16_t, struct qla_qpair *, bool);
+
+extern void qla2x00_init_response_q_entries(struct rsp_que *);
+extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
+extern int qla25xx_delete_queues(struct scsi_qla_host *);
+extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+
+/* qlafx00 related functions */
+extern int qlafx00_pci_config(struct scsi_qla_host *);
+extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
+extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_chip_diag(scsi_qla_host_t *);
+extern void qlafx00_config_rings(struct scsi_qla_host *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
+extern irqreturn_t qlafx00_intr_handler(int, void *);
+extern void qlafx00_enable_intrs(struct qla_hw_data *);
+extern void qlafx00_disable_intrs(struct qla_hw_data *);
+extern int qlafx00_abort_target(fc_port_t *, uint64_t, int);
+extern int qlafx00_lun_reset(fc_port_t *, uint64_t, int);
+extern int qlafx00_start_scsi(srb_t *);
+extern int qlafx00_abort_isp(scsi_qla_host_t *);
+extern int qlafx00_iospace_config(struct qla_hw_data *);
+extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int);
+extern int qlafx00_fw_ready(scsi_qla_host_t *);
+extern int qlafx00_configure_devices(scsi_qla_host_t *);
+extern int qlafx00_reset_initialize(scsi_qla_host_t *);
+extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
+extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern int qlafx00_post_aenfx_work(struct scsi_qla_host *,  uint32_t,
+				   uint32_t *, int);
+extern uint32_t qlafx00_fw_state_show(struct device *,
+				      struct device_attribute *, char *);
+extern void qlafx00_get_host_speed(struct Scsi_Host *);
+extern void qlafx00_init_response_q_entries(struct rsp_que *);
+
+extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
+extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
+extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
+extern void qlafx00_timer_routine(scsi_qla_host_t *);
+extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
+
+/* qla82xx related functions */
+
+/* PCI related functions */
+extern int qla82xx_pci_config(struct scsi_qla_host *);
+extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_pci_region_offset(struct pci_dev *, int);
+extern int qla82xx_iospace_config(struct qla_hw_data *);
+
+/* Initialization related functions */
+extern void qla82xx_reset_chip(struct scsi_qla_host *);
+extern void qla82xx_config_rings(struct scsi_qla_host *);
+extern void qla82xx_watchdog(scsi_qla_host_t *);
+extern int qla82xx_start_firmware(scsi_qla_host_t *);
+
+/* Firmware and flash related functions */
+extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
+extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
+					 uint32_t, uint32_t);
+extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+				     uint32_t, uint32_t);
+
+/* Mailbox related functions */
+extern int qla82xx_abort_isp(scsi_qla_host_t *);
+extern int qla82xx_restart_isp(scsi_qla_host_t *);
+
+/* IOCB related functions */
+extern int qla82xx_start_scsi(srb_t *);
+extern void qla2x00_sp_free(void *);
+extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_bsg_job_done(void *, int);
+extern void qla2x00_bsg_sp_free(void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
+
+/* Interrupt related */
+extern irqreturn_t qla82xx_intr_handler(int, void *);
+extern irqreturn_t qla82xx_msi_handler(int, void *);
+extern irqreturn_t qla82xx_msix_default(int, void *);
+extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
+extern void qla82xx_enable_intrs(struct qla_hw_data *);
+extern void qla82xx_disable_intrs(struct qla_hw_data *);
+extern void qla82xx_poll(int, void *);
+extern void qla82xx_init_flags(struct qla_hw_data *);
+
+/* ISP 8021 hardware related */
+extern void qla82xx_set_drv_active(scsi_qla_host_t *);
+extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
+extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
+extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
+extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
+
+/* ISP 8021 IDC */
+extern void qla82xx_clear_drv_active(struct qla_hw_data *);
+extern uint32_t  qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
+extern int qla82xx_idc_lock(struct qla_hw_data *);
+extern void qla82xx_idc_unlock(struct qla_hw_data *);
+extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
+extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
+
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
+				   size_t, char *);
+extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
+extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
+extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern int qla82xx_check_md_needed(scsi_qla_host_t *);
+extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
+extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
+extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
+extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
+extern int qla82xx_read_temperature(scsi_qla_host_t *);
+extern int qla8044_read_temperature(scsi_qla_host_t *);
+extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int);
+
+/* BSG related functions */
+extern int qla24xx_bsg_request(struct bsg_job *);
+extern int qla24xx_bsg_timeout(struct bsg_job *);
+extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
+	dma_addr_t, size_t, uint32_t);
+extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
+	uint16_t *, uint16_t *);
+
+/* 83xx related functions */
+extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+
+/* Minidump related functions */
+extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
+extern int qla82xx_md_get_template(scsi_qla_host_t *);
+extern int qla82xx_md_alloc(scsi_qla_host_t *);
+extern void qla82xx_md_free(scsi_qla_host_t *);
+extern int qla82xx_md_collect(scsi_qla_host_t *);
+extern void qla82xx_md_prep(scsi_qla_host_t *);
+extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
+extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha);
+
+/* Function declarations for ISP8044 */
+extern int qla8044_idc_lock(struct qla_hw_data *ha);
+extern void qla8044_idc_unlock(struct qla_hw_data *ha);
+extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr);
+extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val);
+extern void qla8044_read_reset_template(struct scsi_qla_host *ha);
+extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
+extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
+extern void qla8044_wr_direct(struct scsi_qla_host *vha,
+			      const uint32_t crb_reg, const uint32_t value);
+extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
+extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
+extern void qla8044_clear_drv_active(struct qla_hw_data *);
+void qla8044_get_minidump(struct scsi_qla_host *vha);
+int qla8044_collect_md_data(struct scsi_qla_host *vha);
+extern int qla8044_md_get_template(scsi_qla_host_t *);
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+				     uint32_t, uint32_t);
+extern irqreturn_t qla8044_intr_handler(int, void *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern int qla8044_abort_isp(scsi_qla_host_t *);
+extern int qla8044_check_fw_alive(struct scsi_qla_host *);
+extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
+	uint16_t *);
+extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
+extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
+extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
+	response_t *);
+
+int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
+	struct imm_ntfy_from_isp *, int);
+void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
+void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
+	struct fc_port *, enum qlt_plogi_link_t);
+void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
+extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
+	uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
+void qla24xx_delete_sess_fn(struct work_struct *);
+void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
+void qlt_clr_qp_table(struct scsi_qla_host *vha);
+
+void qla_nvme_cmpl_io(struct srb_iocb *);
+
+#endif /* _QLA_GBL_H */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_gs.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_gs.c
new file mode 100644
index 0000000..1088038
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_gs.c
@@ -0,0 +1,3501 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+#include <linux/utsname.h>
+
+static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
+static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
+static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
+static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
+static int qla2x00_sns_rft_id(scsi_qla_host_t *);
+static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+
+/**
+ * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
+{
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt;
+
+	ms_pkt = (ms_iocb_entry_t *)arg->iocb;
+	memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
+
+	ms_pkt->entry_type = MS_IOCB_TYPE;
+	ms_pkt->entry_count = 1;
+	SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
+	ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
+	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+	ms_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ms_pkt->total_dsd_count = cpu_to_le16(2);
+	ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
+	ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
+
+	ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
+	ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
+	ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+
+	ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
+	ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
+	ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+
+	vha->qla_stats.control_requests++;
+
+	return (ms_pkt);
+}
+
+/**
+ * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct ct_entry_24xx *ct_pkt;
+
+	ct_pkt = (struct ct_entry_24xx *)arg->iocb;
+	memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
+
+	ct_pkt->entry_type = CT_IOCB_TYPE;
+	ct_pkt->entry_count = 1;
+	ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
+	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
+	ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
+
+	ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
+	ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
+	ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+
+	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
+	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
+	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+	ct_pkt->vp_index = vha->vp_idx;
+
+	vha->qla_stats.control_requests++;
+
+	return (ct_pkt);
+}
+
+/**
+ * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * @ct_req: CT request buffer
+ * @cmd: GS command
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the intitialized @ct_req.
+ */
+static inline struct ct_sns_req *
+qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
+{
+	memset(p, 0, sizeof(struct ct_sns_pkt));
+
+	p->p.req.header.revision = 0x01;
+	p->p.req.header.gs_type = 0xFC;
+	p->p.req.header.gs_subtype = 0x02;
+	p->p.req.command = cpu_to_be16(cmd);
+	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
+
+	return &p->p.req;
+}
+
+int
+qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
+    struct ct_sns_rsp *ct_rsp, const char *routine)
+{
+	int rval;
+	uint16_t comp_status;
+	struct qla_hw_data *ha = vha->hw;
+	bool lid_is_sns = false;
+
+	rval = QLA_FUNCTION_FAILED;
+	if (ms_pkt->entry_status != 0) {
+		ql_dbg(ql_dbg_disc, vha, 0x2031,
+		    "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
+		    routine, ms_pkt->entry_status, vha->d_id.b.domain,
+		    vha->d_id.b.area, vha->d_id.b.al_pa);
+	} else {
+		if (IS_FWI2_CAPABLE(ha))
+			comp_status = le16_to_cpu(
+			    ((struct ct_entry_24xx *)ms_pkt)->comp_status);
+		else
+			comp_status = le16_to_cpu(ms_pkt->status);
+		switch (comp_status) {
+		case CS_COMPLETE:
+		case CS_DATA_UNDERRUN:
+		case CS_DATA_OVERRUN:		/* Overrun? */
+			if (ct_rsp->header.response !=
+			    cpu_to_be16(CT_ACCEPT_RESPONSE)) {
+				ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
+				    "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
+				    routine, vha->d_id.b.domain,
+				    vha->d_id.b.area, vha->d_id.b.al_pa,
+				    comp_status, ct_rsp->header.response);
+				ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
+				    0x2078, (uint8_t *)&ct_rsp->header,
+				    sizeof(struct ct_rsp_hdr));
+				rval = QLA_INVALID_COMMAND;
+			} else
+				rval = QLA_SUCCESS;
+			break;
+		case CS_PORT_LOGGED_OUT:
+			if (IS_FWI2_CAPABLE(ha)) {
+				if (le16_to_cpu(ms_pkt->loop_id.extended) ==
+				    NPH_SNS)
+					lid_is_sns = true;
+			} else {
+				if (le16_to_cpu(ms_pkt->loop_id.extended) ==
+				    SIMPLE_NAME_SERVER)
+					lid_is_sns = true;
+			}
+			if (lid_is_sns) {
+				ql_dbg(ql_dbg_async, vha, 0x502b,
+					"%s failed, Name server has logged out",
+					routine);
+				rval = QLA_NOT_LOGGED_IN;
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+			}
+			break;
+		case CS_TIMEOUT:
+			rval = QLA_FUNCTION_TIMEOUT;
+			/* drop through */
+		default:
+			ql_dbg(ql_dbg_disc, vha, 0x2033,
+			    "%s failed, completion status (%x) on port_id: "
+			    "%02x%02x%02x.\n", routine, comp_status,
+			    vha->d_id.b.domain, vha->d_id.b.area,
+			    vha->d_id.b.al_pa);
+			break;
+		}
+	}
+	return rval;
+}
+
+/**
+ * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
+ * @ha: HA context
+ * @fcport: fcport entry to updated
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int		rval;
+
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct qla_hw_data *ha = vha->hw;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_ga_nxt(vha, fcport);
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = GA_NXT_REQ_SIZE;
+	arg.rsp_size = GA_NXT_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	/* Issue GA_NXT */
+	/* Prepare common MS IOCB */
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
+	    GA_NXT_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare CT arguments -- port_id */
+	ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+	ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+	ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2062,
+		    "GA_NXT issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		/* Populate fc_port_t entry. */
+		fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
+		fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
+		fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
+
+		memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
+		    WWN_SIZE);
+		memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
+		    WWN_SIZE);
+
+		fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
+		    FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+
+		if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
+		    ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
+			fcport->d_id.b.domain = 0xf0;
+
+		ql_dbg(ql_dbg_disc, vha, 0x2063,
+		    "GA_NXT entry - nn %8phN pn %8phN "
+		    "port_id=%02x%02x%02x.\n",
+		    fcport->node_name, fcport->port_name,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa);
+	}
+
+	return (rval);
+}
+
+static inline int
+qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
+{
+	return vha->hw->max_fibre_devices * 4 + 16;
+}
+
+/**
+ * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * NOTE: Non-Nx_Ports are not requested.
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval;
+	uint16_t	i;
+
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+
+	struct ct_sns_gid_pt_data *gid_data;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t gid_pt_rsp_size;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_gid_pt(vha, list);
+
+	gid_data = NULL;
+	gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = GID_PT_REQ_SIZE;
+	arg.rsp_size = gid_pt_rsp_size;
+	arg.nport_handle = NPH_SNS;
+
+	/* Issue GID_PT */
+	/* Prepare common MS IOCB */
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare CT arguments -- port_type */
+	ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2055,
+		    "GID_PT issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		/* Set port IDs in switch info list. */
+		for (i = 0; i < ha->max_fibre_devices; i++) {
+			gid_data = &ct_rsp->rsp.gid_pt.entries[i];
+			list[i].d_id.b.domain = gid_data->port_id[0];
+			list[i].d_id.b.area = gid_data->port_id[1];
+			list[i].d_id.b.al_pa = gid_data->port_id[2];
+			memset(list[i].fabric_port_name, 0, WWN_SIZE);
+			list[i].fp_speed = PORT_SPEED_UNKNOWN;
+
+			/* Last one exit. */
+			if (gid_data->control_byte & BIT_7) {
+				list[i].d_id.b.rsvd_1 = gid_data->control_byte;
+				break;
+			}
+		}
+
+		/*
+		 * If we've used all available slots, then the switch is
+		 * reporting back more devices than we can handle with this
+		 * single call.  Return a failed status, and let GA_NXT handle
+		 * the overload.
+		 */
+		if (i == ha->max_fibre_devices)
+			rval = QLA_FUNCTION_FAILED;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval = QLA_SUCCESS;
+	uint16_t	i;
+
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct qla_hw_data *ha = vha->hw;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_gpn_id(vha, list);
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = GPN_ID_REQ_SIZE;
+	arg.rsp_size = GPN_ID_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Issue GPN_ID */
+		/* Prepare common MS IOCB */
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+		/* Prepare CT request */
+		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
+		    GPN_ID_RSP_SIZE);
+		ct_rsp = &ha->ct_sns->p.rsp;
+
+		/* Prepare CT arguments -- port_id */
+		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+		/* Execute MS IOCB */
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+		    sizeof(ms_iocb_entry_t));
+		if (rval != QLA_SUCCESS) {
+			/*EMPTY*/
+			ql_dbg(ql_dbg_disc, vha, 0x2056,
+			    "GPN_ID issue IOCB failed (%d).\n", rval);
+			break;
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+		    "GPN_ID") != QLA_SUCCESS) {
+			rval = QLA_FUNCTION_FAILED;
+			break;
+		} else {
+			/* Save portname */
+			memcpy(list[i].port_name,
+			    ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval = QLA_SUCCESS;
+	uint16_t	i;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_gnn_id(vha, list);
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = GNN_ID_REQ_SIZE;
+	arg.rsp_size = GNN_ID_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Issue GNN_ID */
+		/* Prepare common MS IOCB */
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+		/* Prepare CT request */
+		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
+		    GNN_ID_RSP_SIZE);
+		ct_rsp = &ha->ct_sns->p.rsp;
+
+		/* Prepare CT arguments -- port_id */
+		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+		/* Execute MS IOCB */
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+		    sizeof(ms_iocb_entry_t));
+		if (rval != QLA_SUCCESS) {
+			/*EMPTY*/
+			ql_dbg(ql_dbg_disc, vha, 0x2057,
+			    "GNN_ID issue IOCB failed (%d).\n", rval);
+			break;
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+		    "GNN_ID") != QLA_SUCCESS) {
+			rval = QLA_FUNCTION_FAILED;
+			break;
+		} else {
+			/* Save nodename */
+			memcpy(list[i].node_name,
+			    ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
+
+			ql_dbg(ql_dbg_disc, vha, 0x2058,
+			    "GID_PT entry - nn %8phN pn %8phN "
+			    "portid=%02x%02x%02x.\n",
+			    list[i].node_name, list[i].port_name,
+			    list[i].d_id.b.domain, list[i].d_id.b.area,
+			    list[i].d_id.b.al_pa);
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rft_id(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_rft_id(vha);
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = RFT_ID_REQ_SIZE;
+	arg.rsp_size = RFT_ID_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	/* Issue RFT_ID */
+	/* Prepare common MS IOCB */
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
+	    RFT_ID_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare CT arguments -- port_id, FC-4 types */
+	ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
+	ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
+	ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
+
+	ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
+
+	if (vha->flags.nvme_enabled)
+		ct_req->req.rft_id.fc4_types[6] = 1;    /* NVMe type 28h */
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2043,
+		    "RFT_ID issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2044,
+		    "RFT_ID exiting normally.\n");
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+		ql_dbg(ql_dbg_disc, vha, 0x2046,
+		    "RFF_ID call not supported on ISP2100/ISP2200.\n");
+		return (QLA_SUCCESS);
+	}
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = RFF_ID_REQ_SIZE;
+	arg.rsp_size = RFF_ID_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	/* Issue RFF_ID */
+	/* Prepare common MS IOCB */
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
+	    RFF_ID_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+	ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
+	ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
+	ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+
+	qlt_rff_id(vha, ct_req);
+
+	ct_req->req.rff_id.fc4_type = type;		/* SCSI - FCP */
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2047,
+		    "RFF_ID issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2048,
+		    "RFF_ID exiting normally.\n");
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rnn_id(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return qla2x00_sns_rnn_id(vha);
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = RNN_ID_REQ_SIZE;
+	arg.rsp_size = RNN_ID_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	/* Issue RNN_ID */
+	/* Prepare common MS IOCB */
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare CT arguments -- port_id, node_name */
+	ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
+	ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
+	ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
+
+	memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x204d,
+		    "RNN_ID issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x204e,
+		    "RNN_ID exiting normally.\n");
+	}
+
+	return (rval);
+}
+
+void
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLAFX00(ha))
+		snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
+		    ha->mr.fw_version, qla2x00_version_str);
+	else
+		snprintf(snn, size,
+		    "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+		    ha->fw_major_version, ha->fw_minor_version,
+		    ha->fw_subminor_version, qla2x00_version_str);
+}
+
+/**
+ * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_rsnn_nn(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+		ql_dbg(ql_dbg_disc, vha, 0x2050,
+		    "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
+		return (QLA_SUCCESS);
+	}
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = 0;
+	arg.rsp_size = RSNN_NN_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	/* Issue RSNN_NN */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
+	    RSNN_NN_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare CT arguments -- node_name, symbolic node_name, size */
+	memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
+
+	/* Prepare the Symbolic Node Name */
+	qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
+	    sizeof(ct_req->req.rsnn_nn.sym_node_name));
+
+	/* Calculate SNN length */
+	ct_req->req.rsnn_nn.name_len =
+	    (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
+
+	/* Update MS IOCB request */
+	ms_pkt->req_bytecount =
+	    cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
+	ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2051,
+		    "RSNN_NN issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2052,
+		    "RSNN_NN exiting normally.\n");
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
+ * @ha: HA context
+ * @cmd: GS command
+ * @scmd_len: Subcommand length
+ * @data_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's sns_cmd.
+ */
+static inline struct sns_cmd_pkt *
+qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
+    uint16_t data_size)
+{
+	uint16_t		wc;
+	struct sns_cmd_pkt	*sns_cmd;
+	struct qla_hw_data *ha = vha->hw;
+
+	sns_cmd = ha->sns_cmd;
+	memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
+	wc = data_size / 2;			/* Size in 16bit words. */
+	sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
+	sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
+	sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
+	sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
+	sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
+	wc = (data_size - 16) / 4;		/* Size in 32bit words. */
+	sns_cmd->p.cmd.size = cpu_to_le16(wc);
+
+	vha->qla_stats.control_requests++;
+
+	return (sns_cmd);
+}
+
+/**
+ * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
+ * @ha: HA context
+ * @fcport: fcport entry to updated
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int		rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	struct sns_cmd_pkt	*sns_cmd;
+
+	/* Issue GA_NXT. */
+	/* Prepare SNS command request. */
+	sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
+	    GA_NXT_SNS_DATA_SIZE);
+
+	/* Prepare SNS command arguments -- port_id. */
+	sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
+	sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
+	sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
+
+	/* Execute SNS command. */
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
+	    sizeof(struct sns_cmd_pkt));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x205f,
+		    "GA_NXT Send SNS failed (%d).\n", rval);
+	} else if (sns_cmd->p.gan_data[8] != 0x80 ||
+	    sns_cmd->p.gan_data[9] != 0x02) {
+		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
+		    "GA_NXT failed, rejected request ga_nxt_rsp:\n");
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
+		    sns_cmd->p.gan_data, 16);
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		/* Populate fc_port_t entry. */
+		fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
+		fcport->d_id.b.area = sns_cmd->p.gan_data[18];
+		fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
+
+		memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
+		memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
+
+		if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
+		    sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
+			fcport->d_id.b.domain = 0xf0;
+
+		ql_dbg(ql_dbg_disc, vha, 0x2061,
+		    "GA_NXT entry - nn %8phN pn %8phN "
+		    "port_id=%02x%02x%02x.\n",
+		    fcport->node_name, fcport->port_name,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa);
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * NOTE: Non-Nx_Ports are not requested.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t	i;
+	uint8_t		*entry;
+	struct sns_cmd_pkt	*sns_cmd;
+	uint16_t gid_pt_sns_data_size;
+
+	gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
+
+	/* Issue GID_PT. */
+	/* Prepare SNS command request. */
+	sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
+	    gid_pt_sns_data_size);
+
+	/* Prepare SNS command arguments -- port_type. */
+	sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
+
+	/* Execute SNS command. */
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
+	    sizeof(struct sns_cmd_pkt));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x206d,
+		    "GID_PT Send SNS failed (%d).\n", rval);
+	} else if (sns_cmd->p.gid_data[8] != 0x80 ||
+	    sns_cmd->p.gid_data[9] != 0x02) {
+		ql_dbg(ql_dbg_disc, vha, 0x202f,
+		    "GID_PT failed, rejected request, gid_rsp:\n");
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
+		    sns_cmd->p.gid_data, 16);
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		/* Set port IDs in switch info list. */
+		for (i = 0; i < ha->max_fibre_devices; i++) {
+			entry = &sns_cmd->p.gid_data[(i * 4) + 16];
+			list[i].d_id.b.domain = entry[1];
+			list[i].d_id.b.area = entry[2];
+			list[i].d_id.b.al_pa = entry[3];
+
+			/* Last one exit. */
+			if (entry[0] & BIT_7) {
+				list[i].d_id.b.rsvd_1 = entry[0];
+				break;
+			}
+		}
+
+		/*
+		 * If we've used all available slots, then the switch is
+		 * reporting back more devices that we can handle with this
+		 * single call.  Return a failed status, and let GA_NXT handle
+		 * the overload.
+		 */
+		if (i == ha->max_fibre_devices)
+			rval = QLA_FUNCTION_FAILED;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t	i;
+	struct sns_cmd_pkt	*sns_cmd;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Issue GPN_ID */
+		/* Prepare SNS command request. */
+		sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
+		    GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
+
+		/* Prepare SNS command arguments -- port_id. */
+		sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
+		sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
+		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
+
+		/* Execute SNS command. */
+		rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
+		    GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
+		if (rval != QLA_SUCCESS) {
+			/*EMPTY*/
+			ql_dbg(ql_dbg_disc, vha, 0x2032,
+			    "GPN_ID Send SNS failed (%d).\n", rval);
+		} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
+		    sns_cmd->p.gpn_data[9] != 0x02) {
+			ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
+			    "GPN_ID failed, rejected request, gpn_rsp:\n");
+			ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
+			    sns_cmd->p.gpn_data, 16);
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			/* Save portname */
+			memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
+			    WWN_SIZE);
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t	i;
+	struct sns_cmd_pkt	*sns_cmd;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Issue GNN_ID */
+		/* Prepare SNS command request. */
+		sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
+		    GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
+
+		/* Prepare SNS command arguments -- port_id. */
+		sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
+		sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
+		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
+
+		/* Execute SNS command. */
+		rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
+		    GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
+		if (rval != QLA_SUCCESS) {
+			/*EMPTY*/
+			ql_dbg(ql_dbg_disc, vha, 0x203f,
+			    "GNN_ID Send SNS failed (%d).\n", rval);
+		} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
+		    sns_cmd->p.gnn_data[9] != 0x02) {
+			ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
+			    "GNN_ID failed, rejected request, gnn_rsp:\n");
+			ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
+			    sns_cmd->p.gnn_data, 16);
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			/* Save nodename */
+			memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
+			    WWN_SIZE);
+
+			ql_dbg(ql_dbg_disc, vha, 0x206e,
+			    "GID_PT entry - nn %8phN pn %8phN "
+			    "port_id=%02x%02x%02x.\n",
+			    list[i].node_name, list[i].port_name,
+			    list[i].d_id.b.domain, list[i].d_id.b.area,
+			    list[i].d_id.b.al_pa);
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
+ * @ha: HA context
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_rft_id(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct sns_cmd_pkt	*sns_cmd;
+
+	/* Issue RFT_ID. */
+	/* Prepare SNS command request. */
+	sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
+	    RFT_ID_SNS_DATA_SIZE);
+
+	/* Prepare SNS command arguments -- port_id, FC-4 types */
+	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
+
+	sns_cmd->p.cmd.param[5] = 0x01;			/* FCP-3 */
+
+	/* Execute SNS command. */
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
+	    sizeof(struct sns_cmd_pkt));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2060,
+		    "RFT_ID Send SNS failed (%d).\n", rval);
+	} else if (sns_cmd->p.rft_data[8] != 0x80 ||
+	    sns_cmd->p.rft_data[9] != 0x02) {
+		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
+		    "RFT_ID failed, rejected request rft_rsp:\n");
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
+		    sns_cmd->p.rft_data, 16);
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2073,
+		    "RFT_ID exiting normally.\n");
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
+ * HBA.
+ * @ha: HA context
+ *
+ * This command uses the old Exectute SNS Command mailbox routine.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct sns_cmd_pkt	*sns_cmd;
+
+	/* Issue RNN_ID. */
+	/* Prepare SNS command request. */
+	sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
+	    RNN_ID_SNS_DATA_SIZE);
+
+	/* Prepare SNS command arguments -- port_id, nodename. */
+	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
+
+	sns_cmd->p.cmd.param[4] = vha->node_name[7];
+	sns_cmd->p.cmd.param[5] = vha->node_name[6];
+	sns_cmd->p.cmd.param[6] = vha->node_name[5];
+	sns_cmd->p.cmd.param[7] = vha->node_name[4];
+	sns_cmd->p.cmd.param[8] = vha->node_name[3];
+	sns_cmd->p.cmd.param[9] = vha->node_name[2];
+	sns_cmd->p.cmd.param[10] = vha->node_name[1];
+	sns_cmd->p.cmd.param[11] = vha->node_name[0];
+
+	/* Execute SNS command. */
+	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
+	    sizeof(struct sns_cmd_pkt));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x204a,
+		    "RNN_ID Send SNS failed (%d).\n", rval);
+	} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
+	    sns_cmd->p.rnn_data[9] != 0x02) {
+		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
+		    "RNN_ID failed, rejected request, rnn_rsp:\n");
+		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
+		    sns_cmd->p.rnn_data, 16);
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x204c,
+		    "RNN_ID exiting normally.\n");
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
+{
+	int ret, rval;
+	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	struct qla_hw_data *ha = vha->hw;
+	ret = QLA_SUCCESS;
+	if (vha->flags.management_server_logged_in)
+		return ret;
+
+	rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
+	    0xfa, mb, BIT_1);
+	if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+		if (rval == QLA_MEMORY_ALLOC_FAILED)
+			ql_dbg(ql_dbg_disc, vha, 0x2085,
+			    "Failed management_server login: loopid=%x "
+			    "rval=%d\n", vha->mgmt_svr_loop_id, rval);
+		else
+			ql_dbg(ql_dbg_disc, vha, 0x2024,
+			    "Failed management_server login: loopid=%x "
+			    "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+			    vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
+			    mb[7]);
+		ret = QLA_FUNCTION_FAILED;
+	} else
+		vha->flags.management_server_logged_in = 1;
+
+	return ret;
+}
+
+/**
+ * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
+    uint32_t rsp_size)
+{
+	ms_iocb_entry_t *ms_pkt;
+	struct qla_hw_data *ha = vha->hw;
+	ms_pkt = ha->ms_iocb;
+	memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
+
+	ms_pkt->entry_type = MS_IOCB_TYPE;
+	ms_pkt->entry_count = 1;
+	SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
+	ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
+	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+	ms_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ms_pkt->total_dsd_count = cpu_to_le16(2);
+	ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
+	ms_pkt->req_bytecount = cpu_to_le32(req_size);
+
+	ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+	ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+	ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+
+	ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+	ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+	ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+
+	return ms_pkt;
+}
+
+/**
+ * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
+ * @ha: HA context
+ * @req_size: request size in bytes
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the @ha's ms_iocb.
+ */
+void *
+qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
+    uint32_t rsp_size)
+{
+	struct ct_entry_24xx *ct_pkt;
+	struct qla_hw_data *ha = vha->hw;
+
+	ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+	memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
+
+	ct_pkt->entry_type = CT_IOCB_TYPE;
+	ct_pkt->entry_count = 1;
+	ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
+	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
+	ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+
+	ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+	ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+	ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+
+	ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
+	ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+	ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+	ct_pkt->vp_index = vha->vp_idx;
+
+	return ct_pkt;
+}
+
+static inline ms_iocb_entry_t *
+qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
+{
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
+	struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+
+	if (IS_FWI2_CAPABLE(ha)) {
+		ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+		ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
+	} else {
+		ms_pkt->req_bytecount = cpu_to_le32(req_size);
+		ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
+	}
+
+	return ms_pkt;
+}
+
+/**
+ * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
+ * @ct_req: CT request buffer
+ * @cmd: GS command
+ * @rsp_size: response size in bytes
+ *
+ * Returns a pointer to the intitialized @ct_req.
+ */
+static inline struct ct_sns_req *
+qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
+    uint16_t rsp_size)
+{
+	memset(p, 0, sizeof(struct ct_sns_pkt));
+
+	p->p.req.header.revision = 0x01;
+	p->p.req.header.gs_type = 0xFA;
+	p->p.req.header.gs_subtype = 0x10;
+	p->p.req.command = cpu_to_be16(cmd);
+	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
+
+	return &p->p.req;
+}
+
+/**
+ * qla2x00_fdmi_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
+{
+	int rval, alen;
+	uint32_t size, sn;
+
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+	void *entries;
+	struct ct_fdmi_hba_attr *eiter;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Issue RHBA */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- attribute block, attributes. */
+	memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
+	ct_req->req.rhba.entry_count = cpu_to_be32(1);
+	memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
+	size = 2 * WWN_SIZE + 4 + 4;
+
+	/* Attributes */
+	ct_req->req.rhba.attrs.count =
+	    cpu_to_be32(FDMI_HBA_ATTR_COUNT);
+	entries = ct_req->req.rhba.hba_identifier;
+
+	/* Nodename. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2025,
+	    "NodeName = %8phN.\n", eiter->a.node_name);
+
+	/* Manufacturer. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
+	alen = strlen(QLA2XXX_MANUFACTURER);
+	snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+	    "%s", "QLogic Corporation");
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2026,
+	    "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+	/* Serial number. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+	if (IS_FWI2_CAPABLE(ha))
+		qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+		    sizeof(eiter->a.serial_num));
+	else {
+		sn = ((ha->serial0 & 0x1f) << 16) |
+			(ha->serial2 << 8) | ha->serial1;
+		snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+		    "%c%05d", 'A' + sn / 100000, sn % 100000);
+	}
+	alen = strlen(eiter->a.serial_num);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2027,
+	    "Serial no. = %s.\n", eiter->a.serial_num);
+
+	/* Model name. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
+	snprintf(eiter->a.model, sizeof(eiter->a.model),
+	    "%s", ha->model_number);
+	alen = strlen(eiter->a.model);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2028,
+	    "Model Name = %s.\n", eiter->a.model);
+
+	/* Model description. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+	snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+	    "%s", ha->model_desc);
+	alen = strlen(eiter->a.model_desc);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2029,
+	    "Model Desc = %s.\n", eiter->a.model_desc);
+
+	/* Hardware version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+	if (!IS_FWI2_CAPABLE(ha)) {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	} else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	}
+	alen = strlen(eiter->a.hw_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x202a,
+	    "Hardware ver = %s.\n", eiter->a.hw_version);
+
+	/* Driver version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+	snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+	    "%s", qla2x00_version_str);
+	alen = strlen(eiter->a.driver_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x202b,
+	    "Driver ver = %s.\n", eiter->a.driver_version);
+
+	/* Option ROM version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+	snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+	    "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+	alen = strlen(eiter->a.orom_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha , 0x202c,
+	    "Optrom vers = %s.\n", eiter->a.orom_version);
+
+	/* Firmware version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+	    sizeof(eiter->a.fw_version));
+	alen = strlen(eiter->a.fw_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x202d,
+	    "Firmware vers = %s.\n", eiter->a.fw_version);
+
+	/* Update MS request size. */
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+	ql_dbg(ql_dbg_disc, vha, 0x202e,
+	    "RHBA identifier = %8phN size=%d.\n",
+	    ct_req->req.rhba.hba_identifier, size);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
+	    entries, size);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2030,
+		    "RHBA issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x2034,
+			    "HBA already registered.\n");
+			rval = QLA_ALREADY_REGISTERED;
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20ad,
+			    "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+			    ct_rsp->header.reason_code,
+			    ct_rsp->header.explanation_code);
+		}
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2035,
+		    "RHBA exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_fdmi_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+{
+	int rval, alen;
+	uint32_t size;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+	void *entries;
+	struct ct_fdmi_port_attr *eiter;
+	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+	struct new_utsname *p_sysid = NULL;
+
+	/* Issue RPA */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
+	    RPA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- attribute block, attributes. */
+	memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+	size = WWN_SIZE + 4;
+
+	/* Attributes */
+	ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
+	entries = ct_req->req.rpa.port_name;
+
+	/* FC4 types. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+	eiter->len = cpu_to_be16(4 + 32);
+	eiter->a.fc4_types[2] = 0x01;
+	size += 4 + 32;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2039,
+	    "FC4_TYPES=%02x %02x.\n",
+	    eiter->a.fc4_types[2],
+	    eiter->a.fc4_types[1]);
+
+	/* Supported speed. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
+	if (IS_CNA_CAPABLE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_10GB);
+	else if (IS_QLA27XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_32GB|
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB);
+	else if (IS_QLA2031(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB);
+	else if (IS_QLA25XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA24XX_TYPE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA23XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_1GB);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203a,
+	    "Supported_Speed=%x.\n", eiter->a.sup_speed);
+
+	/* Current speed. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
+	switch (ha->link_data_rate) {
+	case PORT_SPEED_1GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_1GB);
+		break;
+	case PORT_SPEED_2GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_2GB);
+		break;
+	case PORT_SPEED_4GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_4GB);
+		break;
+	case PORT_SPEED_8GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_8GB);
+		break;
+	case PORT_SPEED_10GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_10GB);
+		break;
+	case PORT_SPEED_16GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_16GB);
+		break;
+	case PORT_SPEED_32GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_32GB);
+		break;
+	default:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+		break;
+	}
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203b,
+	    "Current_Speed=%x.\n", eiter->a.cur_speed);
+
+	/* Max frame size. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+	eiter->len = cpu_to_be16(4 + 4);
+	eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+	    le16_to_cpu(icb24->frame_payload_size) :
+	    le16_to_cpu(ha->init_cb->frame_payload_size);
+	eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203c,
+	    "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+
+	/* OS device name. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+	snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+	    "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+	alen = strlen(eiter->a.os_dev_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x204b,
+	    "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+
+	/* Hostname. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+	p_sysid = utsname();
+	if (p_sysid) {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", p_sysid->nodename);
+	} else {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", fc_host_system_hostname(vha->host));
+	}
+	alen = strlen(eiter->a.host_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
+
+	/* Update MS request size. */
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+	ql_dbg(ql_dbg_disc, vha, 0x203e,
+	    "RPA portname  %016llx, size = %d.\n",
+	    wwn_to_u64(ct_req->req.rpa.port_name), size);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+	    entries, size);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2040,
+		    "RPA issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x20cd,
+			    "RPA already registered.\n");
+			rval = QLA_ALREADY_REGISTERED;
+		}
+
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2041,
+		    "RPA exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
+{
+	int rval, alen;
+	uint32_t size, sn;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+	void *entries;
+	struct ct_fdmiv2_hba_attr *eiter;
+	struct qla_hw_data *ha = vha->hw;
+	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+	struct new_utsname *p_sysid = NULL;
+
+	/* Issue RHBA */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
+	    RHBA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- attribute block, attributes. */
+	memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
+	ct_req->req.rhba2.entry_count = cpu_to_be32(1);
+	memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
+	size = 2 * WWN_SIZE + 4 + 4;
+
+	/* Attributes */
+	ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
+	entries = ct_req->req.rhba2.hba_identifier;
+
+	/* Nodename. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x207d,
+	    "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+	/* Manufacturer. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
+	snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+	    "%s", "QLogic Corporation");
+	eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
+	alen = strlen(eiter->a.manufacturer);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a5,
+	    "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+	/* Serial number. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+	if (IS_FWI2_CAPABLE(ha))
+		qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+		    sizeof(eiter->a.serial_num));
+	else {
+		sn = ((ha->serial0 & 0x1f) << 16) |
+			(ha->serial2 << 8) | ha->serial1;
+		snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+		    "%c%05d", 'A' + sn / 100000, sn % 100000);
+	}
+	alen = strlen(eiter->a.serial_num);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a6,
+	    "Serial no. = %s.\n", eiter->a.serial_num);
+
+	/* Model name. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
+	snprintf(eiter->a.model, sizeof(eiter->a.model),
+	    "%s", ha->model_number);
+	alen = strlen(eiter->a.model);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a7,
+	    "Model Name = %s.\n", eiter->a.model);
+
+	/* Model description. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+	snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+	    "%s", ha->model_desc);
+	alen = strlen(eiter->a.model_desc);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a8,
+	    "Model Desc = %s.\n", eiter->a.model_desc);
+
+	/* Hardware version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+	if (!IS_FWI2_CAPABLE(ha)) {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	} else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	}
+	alen = strlen(eiter->a.hw_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a9,
+	    "Hardware ver = %s.\n", eiter->a.hw_version);
+
+	/* Driver version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+	snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+	    "%s", qla2x00_version_str);
+	alen = strlen(eiter->a.driver_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20aa,
+	    "Driver ver = %s.\n", eiter->a.driver_version);
+
+	/* Option ROM version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+	snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+	    "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+	alen = strlen(eiter->a.orom_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha , 0x20ab,
+	    "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
+	    eiter->a.orom_version[0]);
+
+	/* Firmware version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+	    sizeof(eiter->a.fw_version));
+	alen = strlen(eiter->a.fw_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ac,
+	    "Firmware vers = %s.\n", eiter->a.fw_version);
+
+	/* OS Name and Version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+	p_sysid = utsname();
+	if (p_sysid) {
+		snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+		    "%s %s %s",
+		    p_sysid->sysname, p_sysid->release, p_sysid->version);
+	} else {
+		snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+		    "%s %s", "Linux", fc_host_system_hostname(vha->host));
+	}
+	alen = strlen(eiter->a.os_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ae,
+	    "OS Name and Version = %s.\n", eiter->a.os_version);
+
+	/* MAX CT Payload Length */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+	eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
+	    le16_to_cpu(icb24->frame_payload_size) :
+	    le16_to_cpu(ha->init_cb->frame_payload_size);
+	eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20af,
+	    "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+
+	/* Node Sybolic Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+	qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+	    sizeof(eiter->a.sym_name));
+	alen = strlen(eiter->a.sym_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b0,
+	    "Symbolic Name = %s.\n", eiter->a.sym_name);
+
+	/* Vendor Id */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
+	eiter->a.vendor_id = cpu_to_be32(0x1077);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b1,
+	    "Vendor Id = %x.\n", eiter->a.vendor_id);
+
+	/* Num Ports */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+	eiter->a.num_ports = cpu_to_be32(1);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b2,
+	    "Port Num = %x.\n", eiter->a.num_ports);
+
+	/* Fabric Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+	memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b3,
+	    "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+	/* BIOS Version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+	snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
+	    "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+	alen = strlen(eiter->a.bios_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b4,
+	    "BIOS Name = %s\n", eiter->a.bios_name);
+
+	/* Vendor Identifier */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
+	snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
+	    "%s", "QLGC");
+	alen = strlen(eiter->a.vendor_identifier);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x201b,
+	    "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
+
+	/* Update MS request size. */
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b5,
+	    "RHBA identifier = %016llx.\n",
+	    wwn_to_u64(ct_req->req.rhba2.hba_identifier));
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+	    entries, size);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x20b7,
+		    "RHBA issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x20b8,
+			    "HBA already registered.\n");
+			rval = QLA_ALREADY_REGISTERED;
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x2016,
+			    "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+			    ct_rsp->header.reason_code,
+			    ct_rsp->header.explanation_code);
+		}
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x20b9,
+		    "RHBA FDMI V2 exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_fdmi_dhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+
+	/* Issue RPA */
+	/* Prepare common MS IOCB */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
+	    DHBA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- portname. */
+	memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
+
+	ql_dbg(ql_dbg_disc, vha, 0x2036,
+	    "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2037,
+		    "DHBA issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2038,
+		    "DHBA exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
+{
+	int rval, alen;
+	uint32_t size;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+	void *entries;
+	struct ct_fdmiv2_port_attr *eiter;
+	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+	struct new_utsname *p_sysid = NULL;
+
+	/* Issue RPA */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- attribute block, attributes. */
+	memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
+	size = WWN_SIZE + 4;
+
+	/* Attributes */
+	ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
+	entries = ct_req->req.rpa2.port_name;
+
+	/* FC4 types. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+	eiter->len = cpu_to_be16(4 + 32);
+	eiter->a.fc4_types[2] = 0x01;
+	size += 4 + 32;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ba,
+	    "FC4_TYPES=%02x %02x.\n",
+	    eiter->a.fc4_types[2],
+	    eiter->a.fc4_types[1]);
+
+	if (vha->flags.nvme_enabled) {
+		eiter->a.fc4_types[6] = 1;	/* NVMe type 28h */
+		ql_dbg(ql_dbg_disc, vha, 0x211f,
+		    "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+		    eiter->a.fc4_types[6]);
+	}
+
+	/* Supported speed. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
+	if (IS_CNA_CAPABLE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_10GB);
+	else if (IS_QLA27XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_32GB|
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB);
+	else if (IS_QLA2031(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB);
+	else if (IS_QLA25XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA24XX_TYPE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA23XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_1GB);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20bb,
+	    "Supported Port Speed = %x.\n", eiter->a.sup_speed);
+
+	/* Current speed. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
+	switch (ha->link_data_rate) {
+	case PORT_SPEED_1GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
+		break;
+	case PORT_SPEED_2GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
+		break;
+	case PORT_SPEED_4GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
+		break;
+	case PORT_SPEED_8GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
+		break;
+	case PORT_SPEED_10GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
+		break;
+	case PORT_SPEED_16GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
+		break;
+	case PORT_SPEED_32GB:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
+		break;
+	default:
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+		break;
+	}
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2017,
+	    "Current_Speed = %x.\n", eiter->a.cur_speed);
+
+	/* Max frame size. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+	eiter->len = cpu_to_be16(4 + 4);
+	eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+	    le16_to_cpu(icb24->frame_payload_size):
+	    le16_to_cpu(ha->init_cb->frame_payload_size);
+	eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20bc,
+	    "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
+
+	/* OS device name. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+	alen = strlen(QLA2XXX_DRIVER_NAME);
+	snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+	    "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20be,
+	    "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
+
+	/* Hostname. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+	p_sysid = utsname();
+	if (p_sysid) {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", p_sysid->nodename);
+	} else {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", fc_host_system_hostname(vha->host));
+	}
+	alen = strlen(eiter->a.host_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x201a,
+	    "HostName=%s.\n", eiter->a.host_name);
+
+	/* Node Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c0,
+	    "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+	/* Port Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+	memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c1,
+	    "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
+
+	/* Port Symbolic Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+	qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+	    sizeof(eiter->a.port_sym_name));
+	alen = strlen(eiter->a.port_sym_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c2,
+	    "port symbolic name = %s\n", eiter->a.port_sym_name);
+
+	/* Port Type */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+	eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c3,
+	    "Port Type = %x.\n", eiter->a.port_type);
+
+	/* Class of Service  */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+	eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c4,
+	    "Supported COS = %08x\n", eiter->a.port_supported_cos);
+
+	/* Port Fabric Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+	memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c5,
+	    "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+	/* FC4_type */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+	eiter->a.port_fc4_type[0] = 0;
+	eiter->a.port_fc4_type[1] = 0;
+	eiter->a.port_fc4_type[2] = 1;
+	eiter->a.port_fc4_type[3] = 0;
+	eiter->len = cpu_to_be16(4 + 32);
+	size += 4 + 32;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c6,
+	    "Port Active FC4 Type = %02x %02x.\n",
+	    eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+
+	if (vha->flags.nvme_enabled) {
+		eiter->a.port_fc4_type[4] = 0;
+		eiter->a.port_fc4_type[5] = 0;
+		eiter->a.port_fc4_type[6] = 1;	/* NVMe type 28h */
+		ql_dbg(ql_dbg_disc, vha, 0x2120,
+		    "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+		    eiter->a.port_fc4_type[6]);
+	}
+
+	/* Port State */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+	eiter->a.port_state = cpu_to_be32(1);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c7,
+	    "Port State = %x.\n", eiter->a.port_state);
+
+	/* Number of Ports */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+	eiter->a.num_ports = cpu_to_be32(1);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c8,
+	    "Number of ports = %x.\n", eiter->a.num_ports);
+
+	/* Port Id */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_ID);
+	eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x201c,
+	    "Port Id = %x.\n", eiter->a.port_id);
+
+	/* Update MS request size. */
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+	ql_dbg(ql_dbg_disc, vha, 0x2018,
+	    "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
+	    entries, size);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x20cb,
+		    "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x20ce,
+			    "RPA FDMI v2 already registered\n");
+			rval = QLA_ALREADY_REGISTERED;
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x2020,
+			    "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+			    ct_rsp->header.reason_code,
+			    ct_rsp->header.explanation_code);
+		}
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x20cc,
+		    "RPA FDMI V2 exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_fdmi_register() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_fdmi_register(scsi_qla_host_t *vha)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
+	    IS_QLAFX00(ha))
+		return QLA_FUNCTION_FAILED;
+
+	rval = qla2x00_mgmt_svr_login(vha);
+	if (rval)
+		return rval;
+
+	rval = qla2x00_fdmiv2_rhba(vha);
+	if (rval) {
+		if (rval != QLA_ALREADY_REGISTERED)
+			goto try_fdmi;
+
+		rval = qla2x00_fdmi_dhba(vha);
+		if (rval)
+			goto try_fdmi;
+
+		rval = qla2x00_fdmiv2_rhba(vha);
+		if (rval)
+			goto try_fdmi;
+	}
+	rval = qla2x00_fdmiv2_rpa(vha);
+	if (rval)
+		goto try_fdmi;
+
+	goto out;
+
+try_fdmi:
+	rval = qla2x00_fdmi_rhba(vha);
+	if (rval) {
+		if (rval != QLA_ALREADY_REGISTERED)
+			return rval;
+
+		rval = qla2x00_fdmi_dhba(vha);
+		if (rval)
+			return rval;
+
+		rval = qla2x00_fdmi_rhba(vha);
+		if (rval)
+			return rval;
+	}
+	rval = qla2x00_fdmi_rpa(vha);
+out:
+	return rval;
+}
+
+/**
+ * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval = QLA_SUCCESS;
+	uint16_t	i;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (!IS_IIDMA_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = GFPN_ID_REQ_SIZE;
+	arg.rsp_size = GFPN_ID_RSP_SIZE;
+	arg.nport_handle = NPH_SNS;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Issue GFPN_ID */
+		/* Prepare common MS IOCB */
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+		/* Prepare CT request */
+		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
+		    GFPN_ID_RSP_SIZE);
+		ct_rsp = &ha->ct_sns->p.rsp;
+
+		/* Prepare CT arguments -- port_id */
+		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+		/* Execute MS IOCB */
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+		    sizeof(ms_iocb_entry_t));
+		if (rval != QLA_SUCCESS) {
+			/*EMPTY*/
+			ql_dbg(ql_dbg_disc, vha, 0x2023,
+			    "GFPN_ID issue IOCB failed (%d).\n", rval);
+			break;
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+		    "GFPN_ID") != QLA_SUCCESS) {
+			rval = QLA_FUNCTION_FAILED;
+			break;
+		} else {
+			/* Save fabric portname */
+			memcpy(list[i].fabric_port_name,
+			    ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+
+	return (rval);
+}
+
+
+static inline struct ct_sns_req *
+qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
+    uint16_t rsp_size)
+{
+	memset(p, 0, sizeof(struct ct_sns_pkt));
+
+	p->p.req.header.revision = 0x01;
+	p->p.req.header.gs_type = 0xFA;
+	p->p.req.header.gs_subtype = 0x01;
+	p->p.req.command = cpu_to_be16(cmd);
+	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
+
+	return &p->p.req;
+}
+
+/**
+ * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval;
+	uint16_t	i;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct ct_arg arg;
+
+	if (!IS_IIDMA_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+	if (!ha->flags.gpsc_supported)
+		return QLA_FUNCTION_FAILED;
+
+	rval = qla2x00_mgmt_svr_login(vha);
+	if (rval)
+		return rval;
+
+	arg.iocb = ha->ms_iocb;
+	arg.req_dma = ha->ct_sns_dma;
+	arg.rsp_dma = ha->ct_sns_dma;
+	arg.req_size = GPSC_REQ_SIZE;
+	arg.rsp_size = GPSC_RSP_SIZE;
+	arg.nport_handle = vha->mgmt_svr_loop_id;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Issue GFPN_ID */
+		/* Prepare common MS IOCB */
+		ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
+
+		/* Prepare CT request */
+		ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
+		    GPSC_RSP_SIZE);
+		ct_rsp = &ha->ct_sns->p.rsp;
+
+		/* Prepare CT arguments -- port_name */
+		memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
+		    WWN_SIZE);
+
+		/* Execute MS IOCB */
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+		    sizeof(ms_iocb_entry_t));
+		if (rval != QLA_SUCCESS) {
+			/*EMPTY*/
+			ql_dbg(ql_dbg_disc, vha, 0x2059,
+			    "GPSC issue IOCB failed (%d).\n", rval);
+		} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+		    "GPSC")) != QLA_SUCCESS) {
+			/* FM command unsupported? */
+			if (rval == QLA_INVALID_COMMAND &&
+			    (ct_rsp->header.reason_code ==
+				CT_REASON_INVALID_COMMAND_CODE ||
+			     ct_rsp->header.reason_code ==
+				CT_REASON_COMMAND_UNSUPPORTED)) {
+				ql_dbg(ql_dbg_disc, vha, 0x205a,
+				    "GPSC command unsupported, disabling "
+				    "query.\n");
+				ha->flags.gpsc_supported = 0;
+				rval = QLA_FUNCTION_FAILED;
+				break;
+			}
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			/* Save port-speed */
+			switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+			case BIT_15:
+				list[i].fp_speed = PORT_SPEED_1GB;
+				break;
+			case BIT_14:
+				list[i].fp_speed = PORT_SPEED_2GB;
+				break;
+			case BIT_13:
+				list[i].fp_speed = PORT_SPEED_4GB;
+				break;
+			case BIT_12:
+				list[i].fp_speed = PORT_SPEED_10GB;
+				break;
+			case BIT_11:
+				list[i].fp_speed = PORT_SPEED_8GB;
+				break;
+			case BIT_10:
+				list[i].fp_speed = PORT_SPEED_16GB;
+				break;
+			case BIT_8:
+				list[i].fp_speed = PORT_SPEED_32GB;
+				break;
+			}
+
+			ql_dbg(ql_dbg_disc, vha, 0x205b,
+			    "GPSC ext entry - fpn "
+			    "%8phN speeds=%04x speed=%04x.\n",
+			    list[i].fabric_port_name,
+			    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+			    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
+ *
+ * @ha: HA context
+ * @list: switch info entries to populate
+ *
+ */
+void
+qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
+{
+	int		rval;
+	uint16_t	i;
+
+	ms_iocb_entry_t	*ms_pkt;
+	struct ct_sns_req	*ct_req;
+	struct ct_sns_rsp	*ct_rsp;
+	struct qla_hw_data *ha = vha->hw;
+	uint8_t fcp_scsi_features = 0;
+	struct ct_arg arg;
+
+	for (i = 0; i < ha->max_fibre_devices; i++) {
+		/* Set default FC4 Type as UNKNOWN so the default is to
+		 * Process this port */
+		list[i].fc4_type = FC4_TYPE_UNKNOWN;
+
+		/* Do not attempt GFF_ID if we are not FWI_2 capable */
+		if (!IS_FWI2_CAPABLE(ha))
+			continue;
+
+		arg.iocb = ha->ms_iocb;
+		arg.req_dma = ha->ct_sns_dma;
+		arg.rsp_dma = ha->ct_sns_dma;
+		arg.req_size = GFF_ID_REQ_SIZE;
+		arg.rsp_size = GFF_ID_RSP_SIZE;
+		arg.nport_handle = NPH_SNS;
+
+		/* Prepare common MS IOCB */
+		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+
+		/* Prepare CT request */
+		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
+		    GFF_ID_RSP_SIZE);
+		ct_rsp = &ha->ct_sns->p.rsp;
+
+		/* Prepare CT arguments -- port_id */
+		ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
+		ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
+		ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+
+		/* Execute MS IOCB */
+		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+		   sizeof(ms_iocb_entry_t));
+
+		if (rval != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_disc, vha, 0x205c,
+			    "GFF_ID issue IOCB failed (%d).\n", rval);
+		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
+			       "GFF_ID") != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_disc, vha, 0x205d,
+			    "GFF_ID IOCB status had a failure status code.\n");
+		} else {
+			fcp_scsi_features =
+			   ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+			fcp_scsi_features &= 0x0f;
+
+			if (fcp_scsi_features)
+				list[i].fc4_type = FC4_TYPE_FCP_SCSI;
+			else
+				list[i].fc4_type = FC4_TYPE_OTHER;
+
+			list[i].fc4f_nvme =
+			    ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+			list[i].fc4f_nvme &= 0xf;
+		}
+
+		/* Last device exit. */
+		if (list[i].d_id.b.rsvd_1 != 0)
+			break;
+	}
+}
+
+/* GID_PN completion processing. */
+void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+	fc_port_t *fcport = ea->fcport;
+
+	ql_dbg(ql_dbg_disc, vha, 0x201d,
+	    "%s %8phC login state %d\n",
+	    __func__, fcport->port_name, fcport->fw_login_state);
+
+	if (ea->sp->gen2 != fcport->login_gen) {
+		/* PLOGI/PRLI/LOGO came in while cmd was out.*/
+		ql_dbg(ql_dbg_disc, vha, 0x201e,
+		    "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+		    __func__, fcport->port_name, fcport->last_rscn_gen,
+		    fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+		return;
+	}
+
+	if (!ea->rc) {
+		if (ea->sp->gen1 == fcport->rscn_gen) {
+			fcport->scan_state = QLA_FCPORT_FOUND;
+			fcport->flags |= FCF_FABRIC_DEVICE;
+
+			if (fcport->d_id.b24 == ea->id.b24) {
+				/* cable plugged into the same place */
+				switch (vha->host->active_mode) {
+				case MODE_TARGET:
+					/* NOOP. let the other guy login to us.*/
+					break;
+				case MODE_INITIATOR:
+				case MODE_DUAL:
+				default:
+					ql_dbg(ql_dbg_disc, vha, 0x201f,
+					    "%s %d %8phC post %s\n", __func__,
+					    __LINE__, fcport->port_name,
+					    (atomic_read(&fcport->state) ==
+					    FCS_ONLINE) ? "gpdb" : "gnl");
+
+					if (atomic_read(&fcport->state) ==
+					    FCS_ONLINE)
+						qla24xx_post_gpdb_work(vha,
+						    fcport, PDO_FORCE_ADISC);
+					else
+						qla24xx_post_gnl_work(vha,
+						    fcport);
+					break;
+				}
+			} else { /* fcport->d_id.b24 != ea->id.b24 */
+				fcport->d_id.b24 = ea->id.b24;
+				if (fcport->deleted != QLA_SESS_DELETED) {
+					ql_dbg(ql_dbg_disc, vha, 0x2021,
+					    "%s %d %8phC post del sess\n",
+					    __func__, __LINE__, fcport->port_name);
+					qlt_schedule_sess_for_deletion_lock(fcport);
+				}
+			}
+		} else { /* ea->sp->gen1 != fcport->rscn_gen */
+			ql_dbg(ql_dbg_disc, vha, 0x2022,
+			    "%s %d %8phC post gidpn\n",
+			    __func__, __LINE__, fcport->port_name);
+			/* rscn came in while cmd was out */
+			qla24xx_post_gidpn_work(vha, fcport);
+		}
+	} else { /* ea->rc */
+		/* cable pulled */
+		if (ea->sp->gen1 == fcport->rscn_gen) {
+			if (ea->sp->gen2 == fcport->login_gen) {
+				ql_dbg(ql_dbg_disc, vha, 0x2042,
+				    "%s %d %8phC post del sess\n", __func__,
+				    __LINE__, fcport->port_name);
+				qlt_schedule_sess_for_deletion_lock(fcport);
+			} else {
+				ql_dbg(ql_dbg_disc, vha, 0x2045,
+				    "%s %d %8phC login\n", __func__, __LINE__,
+				    fcport->port_name);
+				qla24xx_fcport_handle_login(vha, fcport);
+			}
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x2049,
+			    "%s %d %8phC post gidpn\n", __func__, __LINE__,
+			    fcport->port_name);
+			qla24xx_post_gidpn_work(vha, fcport);
+		}
+	}
+} /* gidpn_event */
+
+static void qla2x00_async_gidpn_sp_done(void *s, int res)
+{
+	struct srb *sp = s;
+	struct scsi_qla_host *vha = sp->vha;
+	fc_port_t *fcport = sp->fcport;
+	u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
+	struct event_arg ea;
+
+	fcport->flags &= ~FCF_ASYNC_SENT;
+
+	memset(&ea, 0, sizeof(ea));
+	ea.fcport = fcport;
+	ea.id.b.domain = id[0];
+	ea.id.b.area = id[1];
+	ea.id.b.al_pa = id[2];
+	ea.sp = sp;
+	ea.rc = res;
+	ea.event = FCME_GIDPN_DONE;
+
+	if (res == QLA_FUNCTION_TIMEOUT) {
+		ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+		    "Async done-%s WWPN %8phC timed out.\n",
+		    sp->name, fcport->port_name);
+		qla24xx_post_gidpn_work(sp->vha, fcport);
+		sp->free(sp);
+		return;
+	} else if (res) {
+		ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+		    "Async done-%s fail res %x, WWPN %8phC\n",
+		    sp->name, res, fcport->port_name);
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x204f,
+		    "Async done-%s good WWPN %8phC ID %3phC\n",
+		    sp->name, fcport->port_name, id);
+	}
+
+	qla2x00_fcport_event_handler(vha, &ea);
+
+	sp->free(sp);
+}
+
+int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	struct ct_sns_req       *ct_req;
+	srb_t *sp;
+
+	if (!vha->flags.online)
+		goto done;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	fcport->disc_state = DSC_GID_PN;
+	fcport->scan_state = QLA_FCPORT_SCAN;
+	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_CT_PTHRU_CMD;
+	sp->name = "gidpn";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	/* CT_IU preamble  */
+	ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
+		GID_PN_RSP_SIZE);
+
+	/* GIDPN req */
+	memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
+		WWN_SIZE);
+
+	/* req & rsp use the same buffer */
+	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+	sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+	sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+	sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+	sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
+	sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
+	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_gidpn_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a4,
+	    "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
+	    sp->name, fcport->port_name,
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_work_evt *e;
+	int ls;
+
+	ls = atomic_read(&vha->loop_state);
+	if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+		test_bit(UNLOADING, &vha->dpc_flags))
+		return 0;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.fcport.fcport = fcport;
+	return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.fcport.fcport = fcport;
+	return qla2x00_post_work(vha, e);
+}
+
+static void qla24xx_async_gpsc_sp_done(void *s, int res)
+{
+	struct srb *sp = s;
+	struct scsi_qla_host *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	fc_port_t *fcport = sp->fcport;
+	struct ct_sns_rsp       *ct_rsp;
+	struct event_arg ea;
+
+	ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2053,
+	    "Async done-%s res %x, WWPN %8phC \n",
+	    sp->name, res, fcport->port_name);
+
+	fcport->flags &= ~FCF_ASYNC_SENT;
+
+	if (res == (DID_ERROR << 16)) {
+		/* entry status error */
+		goto done;
+	} else if (res) {
+		if ((ct_rsp->header.reason_code ==
+			 CT_REASON_INVALID_COMMAND_CODE) ||
+			(ct_rsp->header.reason_code ==
+			CT_REASON_COMMAND_UNSUPPORTED)) {
+			ql_dbg(ql_dbg_disc, vha, 0x2019,
+			    "GPSC command unsupported, disabling query.\n");
+			ha->flags.gpsc_supported = 0;
+			goto done;
+		}
+	} else {
+		switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+		case BIT_15:
+			fcport->fp_speed = PORT_SPEED_1GB;
+			break;
+		case BIT_14:
+			fcport->fp_speed = PORT_SPEED_2GB;
+			break;
+		case BIT_13:
+			fcport->fp_speed = PORT_SPEED_4GB;
+			break;
+		case BIT_12:
+			fcport->fp_speed = PORT_SPEED_10GB;
+			break;
+		case BIT_11:
+			fcport->fp_speed = PORT_SPEED_8GB;
+			break;
+		case BIT_10:
+			fcport->fp_speed = PORT_SPEED_16GB;
+			break;
+		case BIT_8:
+			fcport->fp_speed = PORT_SPEED_32GB;
+			break;
+		}
+
+		ql_dbg(ql_dbg_disc, vha, 0x2054,
+		    "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
+		    sp->name, fcport->fabric_port_name,
+		    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+		    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+	}
+	memset(&ea, 0, sizeof(ea));
+	ea.event = FCME_GPSC_DONE;
+	ea.rc = res;
+	ea.fcport = fcport;
+	qla2x00_fcport_event_handler(vha, &ea);
+
+done:
+	sp->free(sp);
+}
+
+int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	struct ct_sns_req       *ct_req;
+	srb_t *sp;
+
+	if (!vha->flags.online)
+		goto done;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_CT_PTHRU_CMD;
+	sp->name = "gpsc";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	/* CT_IU preamble  */
+	ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
+		GPSC_RSP_SIZE);
+
+	/* GPSC req */
+	memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
+		WWN_SIZE);
+
+	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+	sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+	sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+	sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+	sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
+	sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
+	sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
+
+	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla24xx_async_gpsc_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x205e,
+	    "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+	    sp->name, fcport->port_name, sp->handle,
+	    fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
+{
+	struct qla_work_evt *e;
+
+	if (test_bit(UNLOADING, &vha->dpc_flags))
+		return 0;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.gpnid.id = *id;
+	return qla2x00_post_work(vha, e);
+}
+
+void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+	if (sp->u.iocb_cmd.u.ctarg.req) {
+		dma_free_coherent(&vha->hw->pdev->dev,
+			sizeof(struct ct_sns_pkt),
+			sp->u.iocb_cmd.u.ctarg.req,
+			sp->u.iocb_cmd.u.ctarg.req_dma);
+		sp->u.iocb_cmd.u.ctarg.req = NULL;
+	}
+	if (sp->u.iocb_cmd.u.ctarg.rsp) {
+		dma_free_coherent(&vha->hw->pdev->dev,
+			sizeof(struct ct_sns_pkt),
+			sp->u.iocb_cmd.u.ctarg.rsp,
+			sp->u.iocb_cmd.u.ctarg.rsp_dma);
+		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+	}
+
+	sp->free(sp);
+}
+
+void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+	fc_port_t *fcport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	if (fcport) {
+		/* cable moved. just plugged in */
+		fcport->rscn_gen++;
+		fcport->d_id = ea->id;
+		fcport->scan_state = QLA_FCPORT_FOUND;
+		fcport->flags |= FCF_FABRIC_DEVICE;
+
+		switch (fcport->disc_state) {
+		case DSC_DELETED:
+			ql_dbg(ql_dbg_disc, vha, 0x210d,
+			    "%s %d %8phC login\n", __func__, __LINE__,
+			    fcport->port_name);
+			qla24xx_fcport_handle_login(vha, fcport);
+			break;
+		case DSC_DELETE_PEND:
+			break;
+		default:
+			ql_dbg(ql_dbg_disc, vha, 0x2064,
+			    "%s %d %8phC post del sess\n",
+			    __func__, __LINE__, fcport->port_name);
+			qlt_schedule_sess_for_deletion_lock(fcport);
+			break;
+		}
+	} else {
+		/* create new fcport */
+		ql_dbg(ql_dbg_disc, vha, 0x2065,
+		    "%s %d %8phC post new sess\n",
+		    __func__, __LINE__, ea->port_name);
+
+		qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+	}
+}
+
+static void qla2x00_async_gpnid_sp_done(void *s, int res)
+{
+	struct srb *sp = s;
+	struct scsi_qla_host *vha = sp->vha;
+	struct ct_sns_req *ct_req =
+	    (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+	struct ct_sns_rsp *ct_rsp =
+	    (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+	struct event_arg ea;
+	struct qla_work_evt *e;
+	unsigned long flags;
+
+	if (res)
+		ql_dbg(ql_dbg_disc, vha, 0x2066,
+		    "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
+		    sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+		    ct_rsp->rsp.gpn_id.port_name);
+	else
+		ql_dbg(ql_dbg_disc, vha, 0x2066,
+		    "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
+		    sp->name, sp->gen1, ct_req->req.port_id.port_id,
+		    ct_rsp->rsp.gpn_id.port_name);
+
+	memset(&ea, 0, sizeof(ea));
+	memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
+	ea.sp = sp;
+	ea.id.b.domain = ct_req->req.port_id.port_id[0];
+	ea.id.b.area = ct_req->req.port_id.port_id[1];
+	ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
+	ea.rc = res;
+	ea.event = FCME_GPNID_DONE;
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	list_del(&sp->elem);
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	if (res) {
+		if (res == QLA_FUNCTION_TIMEOUT)
+			qla24xx_post_gpnid_work(sp->vha, &ea.id);
+		sp->free(sp);
+		return;
+	} else if (sp->gen1) {
+		/* There was anoter RSNC for this Nport ID */
+		qla24xx_post_gpnid_work(sp->vha, &ea.id);
+		sp->free(sp);
+		return;
+	}
+
+	qla2x00_fcport_event_handler(vha, &ea);
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+	if (!e) {
+		/* please ignore kernel warning. otherwise, we have mem leak. */
+		if (sp->u.iocb_cmd.u.ctarg.req) {
+			dma_free_coherent(&vha->hw->pdev->dev,
+				sizeof(struct ct_sns_pkt),
+				sp->u.iocb_cmd.u.ctarg.req,
+				sp->u.iocb_cmd.u.ctarg.req_dma);
+			sp->u.iocb_cmd.u.ctarg.req = NULL;
+		}
+		if (sp->u.iocb_cmd.u.ctarg.rsp) {
+			dma_free_coherent(&vha->hw->pdev->dev,
+				sizeof(struct ct_sns_pkt),
+				sp->u.iocb_cmd.u.ctarg.rsp,
+				sp->u.iocb_cmd.u.ctarg.rsp_dma);
+			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+		}
+
+		sp->free(sp);
+		return;
+	}
+
+	e->u.iosb.sp = sp;
+	qla2x00_post_work(vha, e);
+}
+
+/* Get WWPN with Nport ID. */
+int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	struct ct_sns_req       *ct_req;
+	srb_t *sp, *tsp;
+	struct ct_sns_pkt *ct_sns;
+	unsigned long flags;
+
+	if (!vha->flags.online)
+		goto done;
+
+	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_CT_PTHRU_CMD;
+	sp->name = "gpnid";
+	sp->u.iocb_cmd.u.ctarg.id = *id;
+	sp->gen1 = 0;
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	list_for_each_entry(tsp, &vha->gpnid_list, elem) {
+		if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
+			tsp->gen1++;
+			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+			sp->free(sp);
+			goto done;
+		}
+	}
+	list_add_tail(&sp->elem, &vha->gpnid_list);
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+		sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+		GFP_KERNEL);
+	if (!sp->u.iocb_cmd.u.ctarg.req) {
+		ql_log(ql_log_warn, vha, 0xd041,
+		    "Failed to allocate ct_sns request.\n");
+		goto done_free_sp;
+	}
+
+	sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+		sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+		GFP_KERNEL);
+	if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+		ql_log(ql_log_warn, vha, 0xd042,
+		    "Failed to allocate ct_sns request.\n");
+		goto done_free_sp;
+	}
+
+	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+	memset(ct_sns, 0, sizeof(*ct_sns));
+
+	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+	/* CT_IU preamble  */
+	ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
+
+	/* GPN_ID req */
+	ct_req->req.port_id.port_id[0] = id->b.domain;
+	ct_req->req.port_id.port_id[1] = id->b.area;
+	ct_req->req.port_id.port_id[2] = id->b.al_pa;
+
+	sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
+	sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
+	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_gpnid_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2067,
+	    "Async-%s hdl=%x ID %3phC.\n", sp->name,
+	    sp->handle, ct_req->req.port_id.port_id);
+	return rval;
+
+done_free_sp:
+	spin_lock_irqsave(&vha->hw->vport_slock, flags);
+	list_del(&sp->elem);
+	spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
+	if (sp->u.iocb_cmd.u.ctarg.req) {
+		dma_free_coherent(&vha->hw->pdev->dev,
+			sizeof(struct ct_sns_pkt),
+			sp->u.iocb_cmd.u.ctarg.req,
+			sp->u.iocb_cmd.u.ctarg.req_dma);
+		sp->u.iocb_cmd.u.ctarg.req = NULL;
+	}
+	if (sp->u.iocb_cmd.u.ctarg.rsp) {
+		dma_free_coherent(&vha->hw->pdev->dev,
+			sizeof(struct ct_sns_pkt),
+			sp->u.iocb_cmd.u.ctarg.rsp,
+			sp->u.iocb_cmd.u.ctarg.rsp_dma);
+		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+	}
+
+	sp->free(sp);
+done:
+	return rval;
+}
+
+void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+       fc_port_t *fcport = ea->fcport;
+
+       qla24xx_post_gnl_work(vha, fcport);
+}
+
+void qla24xx_async_gffid_sp_done(void *s, int res)
+{
+       struct srb *sp = s;
+       struct scsi_qla_host *vha = sp->vha;
+       fc_port_t *fcport = sp->fcport;
+       struct ct_sns_rsp *ct_rsp;
+       struct event_arg ea;
+
+       ql_dbg(ql_dbg_disc, vha, 0x2133,
+	   "Async done-%s res %x ID %x. %8phC\n",
+	   sp->name, res, fcport->d_id.b24, fcport->port_name);
+
+       fcport->flags &= ~FCF_ASYNC_SENT;
+       ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+       /*
+	* FC-GS-7, 5.2.3.12 FC-4 Features - format
+	* The format of the FC-4 Features object, as defined by the FC-4,
+	* Shall be an array of 4-bit values, one for each type code value
+	*/
+       if (!res) {
+	       if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+		       /* w1 b00:03 */
+		       fcport->fc4_type =
+			   ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+		       fcport->fc4_type &= 0xf;
+	       }
+
+	       if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+		       /* w5 [00:03]/28h */
+		       fcport->fc4f_nvme =
+			   ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+		       fcport->fc4f_nvme &= 0xf;
+	       }
+       }
+
+       memset(&ea, 0, sizeof(ea));
+       ea.sp = sp;
+       ea.fcport = sp->fcport;
+       ea.rc = res;
+       ea.event = FCME_GFFID_DONE;
+
+       qla2x00_fcport_event_handler(vha, &ea);
+       sp->free(sp);
+}
+
+/* Get FC4 Feature with Nport ID. */
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	struct ct_sns_req       *ct_req;
+	srb_t *sp;
+
+	if (!vha->flags.online)
+		return rval;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		return rval;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	sp->type = SRB_CT_PTHRU_CMD;
+	sp->name = "gffid";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	/* CT_IU preamble  */
+	ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
+	    GFF_ID_RSP_SIZE);
+
+	ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
+	ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
+	ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
+
+	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+	sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+	sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+	sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+	sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
+	sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
+	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla24xx_async_gffid_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2132,
+	    "Async-%s hdl=%x  %8phC.\n", sp->name,
+	    sp->handle, fcport->port_name);
+
+	return rval;
+done_free_sp:
+	sp->free(sp);
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_init.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_init.c
new file mode 100644
index 0000000..a66f7ce
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_init.c
@@ -0,0 +1,8121 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "qla_devtbl.h"
+
+#ifdef CONFIG_SPARC
+#include <asm/prom.h>
+#endif
+
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
+/*
+*  QLogic ISP2x00 Hardware Support Function Prototypes.
+*/
+static int qla2x00_isp_firmware(scsi_qla_host_t *);
+static int qla2x00_setup_chip(scsi_qla_host_t *);
+static int qla2x00_fw_ready(scsi_qla_host_t *);
+static int qla2x00_configure_hba(scsi_qla_host_t *);
+static int qla2x00_configure_loop(scsi_qla_host_t *);
+static int qla2x00_configure_local_loop(scsi_qla_host_t *);
+static int qla2x00_configure_fabric(scsi_qla_host_t *);
+static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
+static int qla2x00_restart_isp(scsi_qla_host_t *);
+
+static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
+static int qla84xx_init_chip(scsi_qla_host_t *);
+static int qla25xx_init_queues(struct qla_hw_data *);
+static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
+static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
+    struct event_arg *);
+static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
+    struct event_arg *);
+
+/* SRB Extensions ---------------------------------------------------------- */
+
+void
+qla2x00_sp_timeout(unsigned long __data)
+{
+	srb_t *sp = (srb_t *)__data;
+	struct srb_iocb *iocb;
+	scsi_qla_host_t *vha = sp->vha;
+	struct req_que *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	req = vha->hw->req_q_map[0];
+	req->outstanding_cmds[sp->handle] = NULL;
+	iocb = &sp->u.iocb_cmd;
+	iocb->timeout(sp);
+	sp->free(sp);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+void
+qla2x00_sp_free(void *ptr)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *iocb = &sp->u.iocb_cmd;
+
+	del_timer(&iocb->timer);
+	qla2x00_rel_sp(sp);
+}
+
+/* Asynchronous Login/Logout Routines -------------------------------------- */
+
+unsigned long
+qla2x00_get_async_timeout(struct scsi_qla_host *vha)
+{
+	unsigned long tmo;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Firmware should use switch negotiated r_a_tov for timeout. */
+	tmo = ha->r_a_tov / 10 * 2;
+	if (IS_QLAFX00(ha)) {
+		tmo = FX00_DEF_RATOV * 2;
+	} else if (!IS_FWI2_CAPABLE(ha)) {
+		/*
+		 * Except for earlier ISPs where the timeout is seeded from the
+		 * initialization control block.
+		 */
+		tmo = ha->login_timeout;
+	}
+	return tmo;
+}
+
+void
+qla2x00_async_iocb_timeout(void *data)
+{
+	srb_t *sp = data;
+	fc_port_t *fcport = sp->fcport;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct event_arg ea;
+
+	if (fcport) {
+		ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+		    "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+		    sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+
+		fcport->flags &= ~FCF_ASYNC_SENT;
+	} else {
+		pr_info("Async-%s timeout - hdl=%x.\n",
+		    sp->name, sp->handle);
+	}
+
+	switch (sp->type) {
+	case SRB_LOGIN_CMD:
+		if (!fcport)
+			break;
+		/* Retry as needed. */
+		lio->u.logio.data[0] = MBS_COMMAND_ERROR;
+		lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+			QLA_LOGIO_LOGIN_RETRIED : 0;
+		memset(&ea, 0, sizeof(ea));
+		ea.event = FCME_PLOGI_DONE;
+		ea.fcport = sp->fcport;
+		ea.data[0] = lio->u.logio.data[0];
+		ea.data[1] = lio->u.logio.data[1];
+		ea.sp = sp;
+		qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+		break;
+	case SRB_LOGOUT_CMD:
+		if (!fcport)
+			break;
+		qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
+		break;
+	case SRB_CT_PTHRU_CMD:
+	case SRB_MB_IOCB:
+	case SRB_NACK_PLOGI:
+	case SRB_NACK_PRLI:
+	case SRB_NACK_LOGO:
+		sp->done(sp, QLA_FUNCTION_TIMEOUT);
+		break;
+	}
+}
+
+static void
+qla2x00_async_login_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct scsi_qla_host *vha = sp->vha;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct event_arg ea;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20dd,
+	    "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
+
+	sp->fcport->flags &= ~FCF_ASYNC_SENT;
+	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+		memset(&ea, 0, sizeof(ea));
+		ea.event = FCME_PLOGI_DONE;
+		ea.fcport = sp->fcport;
+		ea.data[0] = lio->u.logio.data[0];
+		ea.data[1] = lio->u.logio.data[1];
+		ea.iop[0] = lio->u.logio.iop[0];
+		ea.iop[1] = lio->u.logio.iop[1];
+		ea.sp = sp;
+		qla2x00_fcport_event_handler(vha, &ea);
+	}
+
+	sp->free(sp);
+}
+
+int
+qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	srb_t *sp;
+	struct srb_iocb *lio;
+	int rval = QLA_FUNCTION_FAILED;
+
+	if (!vha->flags.online)
+		goto done;
+
+	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+	    (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+		goto done;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	fcport->logout_completed = 0;
+
+	sp->type = SRB_LOGIN_CMD;
+	sp->name = "login";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
+	lio->timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_login_sp_done;
+	lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+
+	if (fcport->fc4f_nvme)
+		lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+
+	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		fcport->flags &= ~FCF_ASYNC_SENT;
+		fcport->flags |= FCF_LOGIN_NEEDED;
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		goto done_free_sp;
+	}
+
+	ql_dbg(ql_dbg_disc, vha, 0x2072,
+	    "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+		"retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    fcport->login_retry);
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+static void
+qla2x00_async_logout_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	sp->fcport->flags &= ~FCF_ASYNC_SENT;
+	if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
+		qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
+		    lio->u.logio.data);
+	sp->free(sp);
+}
+
+int
+qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	srb_t *sp;
+	struct srb_iocb *lio;
+	int rval;
+
+	rval = QLA_FUNCTION_FAILED;
+	fcport->flags |= FCF_ASYNC_SENT;
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_LOGOUT_CMD;
+	sp->name = "logout";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
+	lio->timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_logout_sp_done;
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2070,
+	    "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+		fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		fcport->port_name);
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+static void
+qla2x00_async_adisc_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct scsi_qla_host *vha = sp->vha;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags))
+		qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
+		    lio->u.logio.data);
+	sp->free(sp);
+}
+
+int
+qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	srb_t *sp;
+	struct srb_iocb *lio;
+	int rval;
+
+	rval = QLA_FUNCTION_FAILED;
+	fcport->flags |= FCF_ASYNC_SENT;
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_ADISC_CMD;
+	sp->name = "adisc";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
+	lio->timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_adisc_sp_done;
+	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+		lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x206f,
+	    "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
+	struct event_arg *ea)
+{
+	fc_port_t *fcport, *conflict_fcport;
+	struct get_name_list_extended *e;
+	u16 i, n, found = 0, loop_id;
+	port_id_t id;
+	u64 wwn;
+	u8 opt = 0, current_login_state;
+
+	fcport = ea->fcport;
+
+	if (ea->rc) { /* rval */
+		if (fcport->login_retry == 0) {
+			fcport->login_retry = vha->hw->login_retry_count;
+			ql_dbg(ql_dbg_disc, vha, 0x20de,
+			    "GNL failed Port login retry %8phN, retry cnt=%d.\n",
+			    fcport->port_name, fcport->login_retry);
+		}
+		return;
+	}
+
+	if (fcport->last_rscn_gen != fcport->rscn_gen) {
+		ql_dbg(ql_dbg_disc, vha, 0x20df,
+		    "%s %8phC rscn gen changed rscn %d|%d \n",
+		    __func__, fcport->port_name,
+		    fcport->last_rscn_gen, fcport->rscn_gen);
+		qla24xx_post_gidpn_work(vha, fcport);
+		return;
+	} else if (fcport->last_login_gen != fcport->login_gen) {
+		ql_dbg(ql_dbg_disc, vha, 0x20e0,
+		    "%s %8phC login gen changed login %d|%d\n",
+		    __func__, fcport->port_name,
+		    fcport->last_login_gen, fcport->login_gen);
+		return;
+	}
+
+	n = ea->data[0] / sizeof(struct get_name_list_extended);
+
+	ql_dbg(ql_dbg_disc, vha, 0x20e1,
+	    "%s %d %8phC n %d %02x%02x%02x lid %d \n",
+	    __func__, __LINE__, fcport->port_name, n,
+	    fcport->d_id.b.domain, fcport->d_id.b.area,
+	    fcport->d_id.b.al_pa, fcport->loop_id);
+
+	for (i = 0; i < n; i++) {
+		e = &vha->gnl.l[i];
+		wwn = wwn_to_u64(e->port_name);
+
+		if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
+			continue;
+
+		found = 1;
+		id.b.domain = e->port_id[2];
+		id.b.area = e->port_id[1];
+		id.b.al_pa = e->port_id[0];
+		id.b.rsvd_1 = 0;
+
+		loop_id = le16_to_cpu(e->nport_handle);
+		loop_id = (loop_id & 0x7fff);
+
+		ql_dbg(ql_dbg_disc, vha, 0x20e2,
+		    "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+		    __func__, fcport->port_name,
+		    e->current_login_state, fcport->fw_login_state,
+		    id.b.domain, id.b.area, id.b.al_pa,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+
+		if ((id.b24 != fcport->d_id.b24) ||
+		    ((fcport->loop_id != FC_NO_LOOP_ID) &&
+			(fcport->loop_id != loop_id))) {
+			ql_dbg(ql_dbg_disc, vha, 0x20e3,
+			    "%s %d %8phC post del sess\n",
+			    __func__, __LINE__, fcport->port_name);
+			qlt_schedule_sess_for_deletion(fcport, 1);
+			return;
+		}
+
+		fcport->loop_id = loop_id;
+
+		wwn = wwn_to_u64(fcport->port_name);
+		qlt_find_sess_invalidate_other(vha, wwn,
+			id, loop_id, &conflict_fcport);
+
+		if (conflict_fcport) {
+			/*
+			 * Another share fcport share the same loop_id &
+			 * nport id. Conflict fcport needs to finish
+			 * cleanup before this fcport can proceed to login.
+			 */
+			conflict_fcport->conflict = fcport;
+			fcport->login_pause = 1;
+		}
+
+		if  (fcport->fc4f_nvme)
+			current_login_state = e->current_login_state >> 4;
+		else
+			current_login_state = e->current_login_state & 0xf;
+
+		switch (current_login_state) {
+		case DSC_LS_PRLI_COMP:
+			ql_dbg(ql_dbg_disc, vha, 0x20e4,
+			    "%s %d %8phC post gpdb\n",
+			    __func__, __LINE__, fcport->port_name);
+			opt = PDO_FORCE_ADISC;
+			qla24xx_post_gpdb_work(vha, fcport, opt);
+			break;
+		case DSC_LS_PORT_UNAVAIL:
+		default:
+			if (fcport->loop_id == FC_NO_LOOP_ID) {
+				qla2x00_find_new_loop_id(vha, fcport);
+				fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+			}
+			ql_dbg(ql_dbg_disc, vha, 0x20e5,
+			    "%s %d %8phC\n",
+			    __func__, __LINE__, fcport->port_name);
+			qla24xx_fcport_handle_login(vha, fcport);
+			break;
+		}
+	}
+
+	if (!found) {
+		/* fw has no record of this port */
+		if (fcport->loop_id == FC_NO_LOOP_ID) {
+			qla2x00_find_new_loop_id(vha, fcport);
+			fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+		} else {
+			for (i = 0; i < n; i++) {
+				e = &vha->gnl.l[i];
+				id.b.domain = e->port_id[0];
+				id.b.area = e->port_id[1];
+				id.b.al_pa = e->port_id[2];
+				id.b.rsvd_1 = 0;
+				loop_id = le16_to_cpu(e->nport_handle);
+
+				if (fcport->d_id.b24 == id.b24) {
+					conflict_fcport =
+					    qla2x00_find_fcport_by_wwpn(vha,
+						e->port_name, 0);
+
+					ql_dbg(ql_dbg_disc, vha, 0x20e6,
+					    "%s %d %8phC post del sess\n",
+					    __func__, __LINE__,
+					    conflict_fcport->port_name);
+					qlt_schedule_sess_for_deletion
+						(conflict_fcport, 1);
+				}
+
+				if (fcport->loop_id == loop_id) {
+					/* FW already picked this loop id for another fcport */
+					qla2x00_find_new_loop_id(vha, fcport);
+				}
+			}
+		}
+		qla24xx_fcport_handle_login(vha, fcport);
+	}
+} /* gnl_event */
+
+static void
+qla24xx_async_gnl_sp_done(void *s, int res)
+{
+	struct srb *sp = s;
+	struct scsi_qla_host *vha = sp->vha;
+	unsigned long flags;
+	struct fc_port *fcport = NULL, *tf;
+	u16 i, n = 0, loop_id;
+	struct event_arg ea;
+	struct get_name_list_extended *e;
+	u64 wwn;
+	struct list_head h;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20e7,
+	    "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
+	    sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
+	    sp->u.iocb_cmd.u.mbx.in_mb[2]);
+
+	memset(&ea, 0, sizeof(ea));
+	ea.sp = sp;
+	ea.rc = res;
+	ea.event = FCME_GNL_DONE;
+
+	if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
+	    sizeof(struct get_name_list_extended)) {
+		n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
+		    sizeof(struct get_name_list_extended);
+		ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
+	}
+
+	for (i = 0; i < n; i++) {
+		e = &vha->gnl.l[i];
+		loop_id = le16_to_cpu(e->nport_handle);
+		/* mask out reserve bit */
+		loop_id = (loop_id & 0x7fff);
+		set_bit(loop_id, vha->hw->loop_id_map);
+		wwn = wwn_to_u64(e->port_name);
+
+		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
+		    "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+		    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+		    e->port_id[0], e->current_login_state, e->last_login_state,
+		    (loop_id & 0x7fff));
+	}
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	vha->gnl.sent = 0;
+
+	INIT_LIST_HEAD(&h);
+	fcport = tf = NULL;
+	if (!list_empty(&vha->gnl.fcports))
+		list_splice_init(&vha->gnl.fcports, &h);
+
+	list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
+		list_del_init(&fcport->gnl_entry);
+		fcport->flags &= ~FCF_ASYNC_SENT;
+		ea.fcport = fcport;
+
+		qla2x00_fcport_event_handler(vha, &ea);
+	}
+
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	sp->free(sp);
+}
+
+int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	srb_t *sp;
+	struct srb_iocb *mbx;
+	int rval = QLA_FUNCTION_FAILED;
+	unsigned long flags;
+	u16 *mb;
+
+	if (!vha->flags.online)
+		goto done;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20d9,
+	    "Async-gnlist WWPN %8phC \n", fcport->port_name);
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	fcport->flags |= FCF_ASYNC_SENT;
+	fcport->disc_state = DSC_GNL;
+	fcport->last_rscn_gen = fcport->rscn_gen;
+	fcport->last_login_gen = fcport->login_gen;
+
+	list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
+	if (vha->gnl.sent) {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		rval = QLA_SUCCESS;
+		goto done;
+	}
+	vha->gnl.sent = 1;
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+	sp->type = SRB_MB_IOCB;
+	sp->name = "gnlist";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+	mb = sp->u.iocb_cmd.u.mbx.out_mb;
+	mb[0] = MBC_PORT_NODE_NAME_LIST;
+	mb[1] = BIT_2 | BIT_3;
+	mb[2] = MSW(vha->gnl.ldma);
+	mb[3] = LSW(vha->gnl.ldma);
+	mb[6] = MSW(MSD(vha->gnl.ldma));
+	mb[7] = LSW(MSD(vha->gnl.ldma));
+	mb[8] = vha->gnl.size;
+	mb[9] = vha->vp_idx;
+
+	mbx = &sp->u.iocb_cmd;
+	mbx->timeout = qla2x00_async_iocb_timeout;
+
+	sp->done = qla24xx_async_gnl_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20da,
+	    "Async-%s - OUT WWPN %8phC hndl %x\n",
+	    sp->name, fcport->port_name, sp->handle);
+
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.fcport.fcport = fcport;
+	return qla2x00_post_work(vha, e);
+}
+
+static
+void qla24xx_async_gpdb_sp_done(void *s, int res)
+{
+	struct srb *sp = s;
+	struct scsi_qla_host *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct port_database_24xx *pd;
+	fc_port_t *fcport = sp->fcport;
+	u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
+	int rval = QLA_SUCCESS;
+	struct event_arg ea;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20db,
+	    "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
+	    sp->name, res, fcport->port_name, mb[1], mb[2]);
+
+	fcport->flags &= ~FCF_ASYNC_SENT;
+
+	if (res) {
+		rval = res;
+		goto gpd_error_out;
+	}
+
+	pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+gpd_error_out:
+	memset(&ea, 0, sizeof(ea));
+	ea.event = FCME_GPDB_DONE;
+	ea.rc = rval;
+	ea.fcport = fcport;
+	ea.sp = sp;
+
+	qla2x00_fcport_event_handler(vha, &ea);
+
+	dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+		sp->u.iocb_cmd.u.mbx.in_dma);
+
+	sp->free(sp);
+}
+
+static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.fcport.fcport = fcport;
+
+	return qla2x00_post_work(vha, e);
+}
+
+static void
+qla2x00_async_prli_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct scsi_qla_host *vha = sp->vha;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct event_arg ea;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2129,
+	    "%s %8phC res %d \n", __func__,
+	    sp->fcport->port_name, res);
+
+	sp->fcport->flags &= ~FCF_ASYNC_SENT;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+		memset(&ea, 0, sizeof(ea));
+		ea.event = FCME_PRLI_DONE;
+		ea.fcport = sp->fcport;
+		ea.data[0] = lio->u.logio.data[0];
+		ea.data[1] = lio->u.logio.data[1];
+		ea.iop[0] = lio->u.logio.iop[0];
+		ea.iop[1] = lio->u.logio.iop[1];
+		ea.sp = sp;
+
+		qla2x00_fcport_event_handler(vha, &ea);
+	}
+
+	sp->free(sp);
+}
+
+int
+qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	srb_t *sp;
+	struct srb_iocb *lio;
+	int rval = QLA_FUNCTION_FAILED;
+
+	if (!vha->flags.online)
+		return rval;
+
+	if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+	    fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
+	    fcport->fw_login_state == DSC_LS_PRLI_PEND)
+		return rval;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		return rval;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	fcport->logout_completed = 0;
+
+	sp->type = SRB_PRLI_CMD;
+	sp->name = "prli";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	lio = &sp->u.iocb_cmd;
+	lio->timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_prli_sp_done;
+	lio->u.logio.flags = 0;
+
+	if  (fcport->fc4f_nvme)
+		lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		fcport->flags &= ~FCF_ASYNC_SENT;
+		fcport->flags |= FCF_LOGIN_NEEDED;
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		goto done_free_sp;
+	}
+
+	ql_dbg(ql_dbg_disc, vha, 0x211b,
+	    "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
+	    fcport->port_name, sp->handle, fcport->loop_id,
+	    fcport->d_id.b24, fcport->login_retry);
+
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.fcport.fcport = fcport;
+	e->u.fcport.opt = opt;
+	return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+	srb_t *sp;
+	struct srb_iocb *mbx;
+	int rval = QLA_FUNCTION_FAILED;
+	u16 *mb;
+	dma_addr_t pd_dma;
+	struct port_database_24xx *pd;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!vha->flags.online)
+		goto done;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	fcport->disc_state = DSC_GPDB;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_MB_IOCB;
+	sp->name = "gpdb";
+	sp->gen1 = fcport->rscn_gen;
+	sp->gen2 = fcport->login_gen;
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+	if (pd == NULL) {
+		ql_log(ql_log_warn, vha, 0xd043,
+		    "Failed to allocate port database structure.\n");
+		goto done_free_sp;
+	}
+	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+	mb = sp->u.iocb_cmd.u.mbx.out_mb;
+	mb[0] = MBC_GET_PORT_DATABASE;
+	mb[1] = fcport->loop_id;
+	mb[2] = MSW(pd_dma);
+	mb[3] = LSW(pd_dma);
+	mb[6] = MSW(MSD(pd_dma));
+	mb[7] = LSW(MSD(pd_dma));
+	mb[9] = vha->vp_idx;
+	mb[10] = opt;
+
+	mbx = &sp->u.iocb_cmd;
+	mbx->timeout = qla2x00_async_iocb_timeout;
+	mbx->u.mbx.in = (void *)pd;
+	mbx->u.mbx.in_dma = pd_dma;
+
+	sp->done = qla24xx_async_gpdb_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20dc,
+	    "Async-%s %8phC hndl %x opt %x\n",
+	    sp->name, fcport->port_name, sp->handle, opt);
+
+	return rval;
+
+done_free_sp:
+	if (pd)
+		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	qla24xx_post_gpdb_work(vha, fcport, opt);
+	return rval;
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+	int rval = ea->rc;
+	fc_port_t *fcport = ea->fcport;
+	unsigned long flags;
+	u16 opt = ea->sp->u.iocb_cmd.u.mbx.out_mb[10];
+
+	fcport->flags &= ~FCF_ASYNC_SENT;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20d2,
+	    "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
+	    fcport->disc_state, fcport->fw_login_state, rval);
+
+	if (ea->sp->gen2 != fcport->login_gen) {
+		/* target side must have changed it. */
+		ql_dbg(ql_dbg_disc, vha, 0x20d3,
+		    "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+		    __func__, fcport->port_name, fcport->last_rscn_gen,
+		    fcport->rscn_gen, fcport->last_login_gen,
+		    fcport->login_gen);
+		return;
+	} else if (ea->sp->gen1 != fcport->rscn_gen) {
+		ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+		    __func__, __LINE__, fcport->port_name);
+		qla24xx_post_gidpn_work(vha, fcport);
+		return;
+	}
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
+		    __func__, __LINE__, fcport->port_name);
+		qlt_schedule_sess_for_deletion_lock(fcport);
+		return;
+	}
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	if (opt != PDO_FORCE_ADISC)
+		ea->fcport->login_gen++;
+	ea->fcport->deleted = 0;
+	ea->fcport->logout_on_delete = 1;
+
+	if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
+		vha->fcport_count++;
+		ea->fcport->login_succ = 1;
+
+		if (!IS_IIDMA_CAPABLE(vha->hw) ||
+		    !vha->hw->flags.gpsc_supported) {
+			ql_dbg(ql_dbg_disc, vha, 0x20d6,
+			    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+			    __func__, __LINE__, fcport->port_name,
+			    vha->fcport_count);
+
+			qla24xx_post_upd_fcport_work(vha, fcport);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20d7,
+			    "%s %d %8phC post gpsc fcp_cnt %d\n",
+			    __func__, __LINE__, fcport->port_name,
+			    vha->fcport_count);
+
+			qla24xx_post_gpsc_work(vha, fcport);
+		}
+	} else if (ea->fcport->login_succ) {
+		/*
+		 * We have an existing session. A late RSCN delivery
+		 * must have triggered the session to be re-validate.
+		 * session is still valid.
+		 */
+		fcport->disc_state = DSC_LOGIN_COMPLETE;
+	}
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+} /* gpdb event */
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	if (fcport->login_retry == 0)
+		return 0;
+
+	if (fcport->scan_state != QLA_FCPORT_FOUND)
+		return 0;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20d8,
+	    "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+	    __func__, fcport->port_name, fcport->disc_state,
+	    fcport->fw_login_state, fcport->login_pause, fcport->flags,
+	    fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
+	    fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
+	    fcport->loop_id);
+
+	fcport->login_retry--;
+
+	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+		return 0;
+
+	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+			return 0;
+	}
+
+	/* for pure Target Mode. Login will not be initiated */
+	if (vha->host->active_mode == MODE_TARGET)
+		return 0;
+
+	if (fcport->flags & FCF_ASYNC_SENT) {
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		return 0;
+	}
+
+	switch (fcport->disc_state) {
+	case DSC_DELETED:
+		if (fcport->loop_id == FC_NO_LOOP_ID) {
+			ql_dbg(ql_dbg_disc, vha, 0x20bd,
+			    "%s %d %8phC post gnl\n",
+			    __func__, __LINE__, fcport->port_name);
+			qla24xx_post_gnl_work(vha, fcport);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20bf,
+			    "%s %d %8phC post login\n",
+			    __func__, __LINE__, fcport->port_name);
+			fcport->disc_state = DSC_LOGIN_PEND;
+			qla2x00_post_async_login_work(vha, fcport, NULL);
+		}
+		break;
+
+	case DSC_GNL:
+		if (fcport->login_pause) {
+			fcport->last_rscn_gen = fcport->rscn_gen;
+			fcport->last_login_gen = fcport->login_gen;
+			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+			break;
+		}
+
+		if (fcport->flags & FCF_FCP2_DEVICE) {
+			u8 opt = PDO_FORCE_ADISC;
+
+			ql_dbg(ql_dbg_disc, vha, 0x20c9,
+			    "%s %d %8phC post gpdb\n",
+			    __func__, __LINE__, fcport->port_name);
+
+			fcport->disc_state = DSC_GPDB;
+			qla24xx_post_gpdb_work(vha, fcport, opt);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20cf,
+			    "%s %d %8phC post login\n",
+			    __func__, __LINE__, fcport->port_name);
+			fcport->disc_state = DSC_LOGIN_PEND;
+			qla2x00_post_async_login_work(vha, fcport, NULL);
+		}
+
+		break;
+
+	case DSC_LOGIN_FAILED:
+		ql_dbg(ql_dbg_disc, vha, 0x20d0,
+		    "%s %d %8phC post gidpn\n",
+		    __func__, __LINE__, fcport->port_name);
+
+		qla24xx_post_gidpn_work(vha, fcport);
+		break;
+
+	case DSC_LOGIN_COMPLETE:
+		/* recheck login state */
+		ql_dbg(ql_dbg_disc, vha, 0x20d1,
+		    "%s %d %8phC post gpdb\n",
+		    __func__, __LINE__, fcport->port_name);
+
+		qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static
+void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
+{
+	fcport->rscn_gen++;
+
+	ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
+	    "%s %8phC DS %d LS %d\n",
+	    __func__, fcport->port_name, fcport->disc_state,
+	    fcport->fw_login_state);
+
+	if (fcport->flags & FCF_ASYNC_SENT)
+		return;
+
+	switch (fcport->disc_state) {
+	case DSC_DELETED:
+	case DSC_LOGIN_COMPLETE:
+		qla24xx_post_gidpn_work(fcport->vha, fcport);
+		break;
+
+	default:
+		break;
+	}
+}
+
+int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+	u8 *port_name, void *pla)
+{
+	struct qla_work_evt *e;
+	e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.new_sess.id = *id;
+	e->u.new_sess.pla = pla;
+	memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+
+	return qla2x00_post_work(vha, e);
+}
+
+static
+int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
+	struct event_arg *ea)
+{
+	fc_port_t *fcport = ea->fcport;
+
+	if (test_bit(UNLOADING, &vha->dpc_flags))
+		return 0;
+
+	switch (vha->host->active_mode) {
+	case MODE_INITIATOR:
+	case MODE_DUAL:
+		if (fcport->scan_state == QLA_FCPORT_FOUND)
+			qla24xx_fcport_handle_login(vha, fcport);
+		break;
+
+	case MODE_TARGET:
+	default:
+		/* no-op */
+		break;
+	}
+
+	return 0;
+}
+
+static
+void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
+	struct event_arg *ea)
+{
+	fc_port_t *fcport = ea->fcport;
+
+	if (fcport->scan_state != QLA_FCPORT_FOUND) {
+		fcport->login_retry++;
+		return;
+	}
+
+	ql_dbg(ql_dbg_disc, vha, 0x2102,
+	    "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
+	    __func__, fcport->port_name, fcport->disc_state,
+	    fcport->fw_login_state, fcport->login_pause,
+	    fcport->deleted, fcport->conflict,
+	    fcport->last_rscn_gen, fcport->rscn_gen,
+	    fcport->last_login_gen, fcport->login_gen,
+	    fcport->flags);
+
+	if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+	    (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+		return;
+
+	if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+		if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+			return;
+	}
+
+	if (fcport->flags & FCF_ASYNC_SENT) {
+		fcport->login_retry++;
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		return;
+	}
+
+	if (fcport->disc_state == DSC_DELETE_PEND) {
+		fcport->login_retry++;
+		return;
+	}
+
+	if (fcport->last_rscn_gen != fcport->rscn_gen) {
+		ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
+		    __func__, __LINE__, fcport->port_name);
+
+		qla24xx_post_gidpn_work(vha, fcport);
+		return;
+	}
+
+	qla24xx_fcport_handle_login(vha, fcport);
+}
+
+void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+	fc_port_t *fcport, *f, *tf;
+	uint32_t id = 0, mask, rid;
+	int rc;
+
+	switch (ea->event) {
+	case FCME_RELOGIN:
+	case FCME_RSCN:
+	case FCME_GIDPN_DONE:
+	case FCME_GPSC_DONE:
+	case FCME_GPNID_DONE:
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+		    test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+			return;
+		break;
+	default:
+		break;
+	}
+
+	switch (ea->event) {
+	case FCME_RELOGIN:
+		if (test_bit(UNLOADING, &vha->dpc_flags))
+			return;
+
+		qla24xx_handle_relogin_event(vha, ea);
+		break;
+	case FCME_RSCN:
+		if (test_bit(UNLOADING, &vha->dpc_flags))
+			return;
+		switch (ea->id.b.rsvd_1) {
+		case RSCN_PORT_ADDR:
+			fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+			if (!fcport) {
+				/* cable moved */
+				rc = qla24xx_post_gpnid_work(vha, &ea->id);
+				if (rc) {
+					ql_log(ql_log_warn, vha, 0xd044,
+					    "RSCN GPNID work failed %02x%02x%02x\n",
+					    ea->id.b.domain, ea->id.b.area,
+					    ea->id.b.al_pa);
+				}
+			} else {
+				ea->fcport = fcport;
+				qla24xx_handle_rscn_event(fcport, ea);
+			}
+			break;
+		case RSCN_AREA_ADDR:
+		case RSCN_DOM_ADDR:
+			if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
+				mask = 0xffff00;
+				ql_dbg(ql_dbg_async, vha, 0x5044,
+				    "RSCN: Area 0x%06x was affected\n",
+				    ea->id.b24);
+			} else {
+				mask = 0xff0000;
+				ql_dbg(ql_dbg_async, vha, 0x507a,
+				    "RSCN: Domain 0x%06x was affected\n",
+				    ea->id.b24);
+			}
+
+			rid = ea->id.b24 & mask;
+			list_for_each_entry_safe(f, tf, &vha->vp_fcports,
+			    list) {
+				id = f->d_id.b24 & mask;
+				if (rid == id) {
+					ea->fcport = f;
+					qla24xx_handle_rscn_event(f, ea);
+				}
+			}
+			break;
+		case RSCN_FAB_ADDR:
+		default:
+			ql_log(ql_log_warn, vha, 0xd045,
+			    "RSCN: Fabric was affected. Addr format %d\n",
+			    ea->id.b.rsvd_1);
+			qla2x00_mark_all_devices_lost(vha, 1);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+		}
+		break;
+	case FCME_GIDPN_DONE:
+		qla24xx_handle_gidpn_event(vha, ea);
+		break;
+	case FCME_GNL_DONE:
+		qla24xx_handle_gnl_done_event(vha, ea);
+		break;
+	case FCME_GPSC_DONE:
+		qla24xx_post_upd_fcport_work(vha, ea->fcport);
+		break;
+	case FCME_PLOGI_DONE:	/* Initiator side sent LLIOCB */
+		qla24xx_handle_plogi_done_event(vha, ea);
+		break;
+	case FCME_PRLI_DONE:
+		qla24xx_handle_prli_done_event(vha, ea);
+		break;
+	case FCME_GPDB_DONE:
+		qla24xx_handle_gpdb_event(vha, ea);
+		break;
+	case FCME_GPNID_DONE:
+		qla24xx_handle_gpnid_event(vha, ea);
+		break;
+	case FCME_GFFID_DONE:
+		qla24xx_handle_gffid_event(vha, ea);
+		break;
+	case FCME_DELETE_DONE:
+		qla24xx_handle_delete_done_event(vha, ea);
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+}
+
+static void
+qla2x00_tmf_iocb_timeout(void *data)
+{
+	srb_t *sp = data;
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+	tmf->u.tmf.comp_status = CS_TIMEOUT;
+	complete(&tmf->u.tmf.comp);
+}
+
+static void
+qla2x00_tmf_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+	complete(&tmf->u.tmf.comp);
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+	uint32_t tag)
+{
+	struct scsi_qla_host *vha = fcport->vha;
+	struct srb_iocb *tm_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	tm_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_TM_CMD;
+	sp->name = "tmf";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	tm_iocb->u.tmf.flags = flags;
+	tm_iocb->u.tmf.lun = lun;
+	tm_iocb->u.tmf.data = tag;
+	sp->done = qla2x00_tmf_sp_done;
+	tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+	init_completion(&tm_iocb->u.tmf.comp);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_taskm, vha, 0x802f,
+	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	wait_for_completion(&tm_iocb->u.tmf.comp);
+
+	rval = tm_iocb->u.tmf.data;
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x8030,
+		    "TM IOCB failed (%x).\n", rval);
+	}
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+		flags = tm_iocb->u.tmf.flags;
+		lun = (uint16_t)tm_iocb->u.tmf.lun;
+
+		/* Issue Marker IOCB */
+		qla2x00_marker(vha, vha->hw->req_q_map[0],
+		    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
+		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
+	}
+
+done_free_sp:
+	sp->free(sp);
+done:
+	return rval;
+}
+
+static void
+qla24xx_abort_iocb_timeout(void *data)
+{
+	srb_t *sp = data;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	abt->u.abt.comp_status = CS_TIMEOUT;
+	complete(&abt->u.abt.comp);
+}
+
+static void
+qla24xx_abort_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	if (del_timer(&sp->u.iocb_cmd.timer))
+		complete(&abt->u.abt.comp);
+}
+
+int
+qla24xx_async_abort_cmd(srb_t *cmd_sp)
+{
+	scsi_qla_host_t *vha = cmd_sp->vha;
+	fc_port_t *fcport = cmd_sp->fcport;
+	struct srb_iocb *abt_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	abt_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_ABT_CMD;
+	sp->name = "abort";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+	sp->done = qla24xx_abort_sp_done;
+	abt_iocb->timeout = qla24xx_abort_iocb_timeout;
+	init_completion(&abt_iocb->u.abt.comp);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_async, vha, 0x507c,
+	    "Abort command issued - hdl=%x, target_id=%x\n",
+	    cmd_sp->handle, fcport->tgt_id);
+
+	wait_for_completion(&abt_iocb->u.abt.comp);
+
+	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	return rval;
+}
+
+int
+qla24xx_async_abort_command(srb_t *sp)
+{
+	unsigned long   flags = 0;
+
+	uint32_t	handle;
+	fc_port_t	*fcport = sp->fcport;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = vha->req;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+		if (req->outstanding_cmds[handle] == sp)
+			break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (handle == req->num_outstanding_cmds) {
+		/* Command not found. */
+		return QLA_FUNCTION_FAILED;
+	}
+	if (sp->type == SRB_FXIOCB_DCMD)
+		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+		    FXDISC_ABORT_IOCTL);
+
+	return qla24xx_async_abort_cmd(sp);
+}
+
+static void
+qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+{
+	switch (ea->data[0]) {
+	case MBS_COMMAND_COMPLETE:
+		ql_dbg(ql_dbg_disc, vha, 0x2118,
+		    "%s %d %8phC post gpdb\n",
+		    __func__, __LINE__, ea->fcport->port_name);
+
+		ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+		ea->fcport->logout_on_delete = 1;
+		qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+		break;
+	default:
+		ql_dbg(ql_dbg_disc, vha, 0x2119,
+		    "%s %d %8phC unhandle event of %x\n",
+		    __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
+		break;
+	}
+}
+
+static void
+qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+{
+	port_id_t cid;	/* conflict Nport id */
+	u16 lid;
+	struct fc_port *conflict_fcport;
+
+	switch (ea->data[0]) {
+	case MBS_COMMAND_COMPLETE:
+		/*
+		 * Driver must validate login state - If PRLI not complete,
+		 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
+		 * requests.
+		 */
+		if (ea->fcport->fc4f_nvme) {
+			ql_dbg(ql_dbg_disc, vha, 0x2117,
+				"%s %d %8phC post prli\n",
+				__func__, __LINE__, ea->fcport->port_name);
+			qla24xx_post_prli_work(vha, ea->fcport);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20ea,
+			    "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+			    __func__, __LINE__, ea->fcport->port_name,
+			    ea->fcport->loop_id, ea->fcport->d_id.b24);
+
+			set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+			ea->fcport->loop_id = FC_NO_LOOP_ID;
+			ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+			ea->fcport->logout_on_delete = 1;
+			ea->fcport->send_els_logo = 0;
+			qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+		}
+		break;
+	case MBS_COMMAND_ERROR:
+		ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
+		    __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
+
+		ea->fcport->flags &= ~FCF_ASYNC_SENT;
+		ea->fcport->disc_state = DSC_LOGIN_FAILED;
+		if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
+			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		else
+			qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
+		break;
+	case MBS_LOOP_ID_USED:
+		/* data[1] = IO PARAM 1 = nport ID  */
+		cid.b.domain = (ea->iop[1] >> 16) & 0xff;
+		cid.b.area   = (ea->iop[1] >>  8) & 0xff;
+		cid.b.al_pa  = ea->iop[1] & 0xff;
+		cid.b.rsvd_1 = 0;
+
+		ql_dbg(ql_dbg_disc, vha, 0x20ec,
+		    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+		    __func__, __LINE__, ea->fcport->port_name,
+		    ea->fcport->loop_id, cid.b24);
+
+		set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+		ea->fcport->loop_id = FC_NO_LOOP_ID;
+		qla24xx_post_gnl_work(vha, ea->fcport);
+		break;
+	case MBS_PORT_ID_USED:
+		lid = ea->iop[1] & 0xffff;
+		qlt_find_sess_invalidate_other(vha,
+		    wwn_to_u64(ea->fcport->port_name),
+		    ea->fcport->d_id, lid, &conflict_fcport);
+
+		if (conflict_fcport) {
+			/*
+			 * Another fcport share the same loop_id/nport id.
+			 * Conflict fcport needs to finish cleanup before this
+			 * fcport can proceed to login.
+			 */
+			conflict_fcport->conflict = ea->fcport;
+			ea->fcport->login_pause = 1;
+
+			ql_dbg(ql_dbg_disc, vha, 0x20ed,
+			    "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+			    __func__, __LINE__, ea->fcport->port_name,
+			    ea->fcport->d_id.b24, lid);
+			qla2x00_clear_loop_id(ea->fcport);
+			qla24xx_post_gidpn_work(vha, ea->fcport);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20ed,
+			    "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
+			    __func__, __LINE__, ea->fcport->port_name,
+			    ea->fcport->d_id.b24, lid);
+
+			qla2x00_clear_loop_id(ea->fcport);
+			set_bit(lid, vha->hw->loop_id_map);
+			ea->fcport->loop_id = lid;
+			ea->fcport->keep_nport_handle = 0;
+			qlt_schedule_sess_for_deletion(ea->fcport, false);
+		}
+		break;
+	}
+	return;
+}
+
+void
+qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	qla2x00_mark_device_lost(vha, fcport, 1, 0);
+	qlt_logo_completion_handler(fcport, data[0]);
+	fcport->login_gen++;
+	return;
+}
+
+void
+qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	if (data[0] == MBS_COMMAND_COMPLETE) {
+		qla2x00_update_fcport(vha, fcport);
+
+		return;
+	}
+
+	/* Retry login. */
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+	else
+		qla2x00_mark_device_lost(vha, fcport, 1, 0);
+
+	return;
+}
+
+/****************************************************************************/
+/*                QLogic ISP2x00 Hardware Support Functions.                */
+/****************************************************************************/
+
+static int
+qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t idc_major_ver, idc_minor_ver;
+	uint16_t config[4];
+
+	qla83xx_idc_lock(vha, 0);
+
+	/* SV: TODO: Assign initialization timeout from
+	 * flash-info / other param
+	 */
+	ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
+	ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
+
+	/* Set our fcoe function presence */
+	if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb077,
+		    "Error while setting DRV-Presence.\n");
+		rval = QLA_FUNCTION_FAILED;
+		goto exit;
+	}
+
+	/* Decide the reset ownership */
+	qla83xx_reset_ownership(vha);
+
+	/*
+	 * On first protocol driver load:
+	 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
+	 * register.
+	 * Others: Check compatibility with current IDC Major version.
+	 */
+	qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
+	if (ha->flags.nic_core_reset_owner) {
+		/* Set IDC Major version */
+		idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
+		qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
+
+		/* Clearing IDC-Lock-Recovery register */
+		qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
+	} else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
+		/*
+		 * Clear further IDC participation if we are not compatible with
+		 * the current IDC Major Version.
+		 */
+		ql_log(ql_log_warn, vha, 0xb07d,
+		    "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
+		    idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
+		__qla83xx_clear_drv_presence(vha);
+		rval = QLA_FUNCTION_FAILED;
+		goto exit;
+	}
+	/* Each function sets its supported Minor version. */
+	qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
+	idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
+	qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
+
+	if (ha->flags.nic_core_reset_owner) {
+		memset(config, 0, sizeof(config));
+		if (!qla81xx_get_port_config(vha, config))
+			qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+			    QLA8XXX_DEV_READY);
+	}
+
+	rval = qla83xx_idc_state_handler(vha);
+
+exit:
+	qla83xx_idc_unlock(vha, 0);
+
+	return rval;
+}
+
+/*
+* qla2x00_initialize_adapter
+*      Initialize board.
+*
+* Input:
+*      ha = adapter block pointer.
+*
+* Returns:
+*      0 = success
+*/
+int
+qla2x00_initialize_adapter(scsi_qla_host_t *vha)
+{
+	int	rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
+	memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
+	/* Clear adapter flags. */
+	vha->flags.online = 0;
+	ha->flags.chip_reset_done = 0;
+	vha->flags.reset_active = 0;
+	ha->flags.pci_channel_io_perm_failure = 0;
+	ha->flags.eeh_busy = 0;
+	vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->device_flags = DFLG_NO_CABLE;
+	vha->dpc_flags = 0;
+	vha->flags.management_server_logged_in = 0;
+	vha->marker_needed = 0;
+	ha->isp_abort_cnt = 0;
+	ha->beacon_blink_led = 0;
+
+	set_bit(0, ha->req_qid_map);
+	set_bit(0, ha->rsp_qid_map);
+
+	ql_dbg(ql_dbg_init, vha, 0x0040,
+	    "Configuring PCI space...\n");
+	rval = ha->isp_ops->pci_config(vha);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0044,
+		    "Unable to configure PCI space.\n");
+		return (rval);
+	}
+
+	ha->isp_ops->reset_chip(vha);
+
+	rval = qla2xxx_get_flash_info(vha);
+	if (rval) {
+		ql_log(ql_log_fatal, vha, 0x004f,
+		    "Unable to validate FLASH data.\n");
+		return rval;
+	}
+
+	if (IS_QLA8044(ha)) {
+		qla8044_read_reset_template(vha);
+
+		/* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+		 * If DONRESET_BIT0 is set, drivers should not set dev_state
+		 * to NEED_RESET. But if NEED_RESET is set, drivers should
+		 * should honor the reset. */
+		if (ql2xdontresethba == 1)
+			qla8044_set_idc_dontreset(vha);
+	}
+
+	ha->isp_ops->get_flash_version(vha, req->ring);
+	ql_dbg(ql_dbg_init, vha, 0x0061,
+	    "Configure NVRAM parameters...\n");
+
+	ha->isp_ops->nvram_config(vha);
+
+	if (ha->flags.disable_serdes) {
+		/* Mask HBA via NVRAM settings? */
+		ql_log(ql_log_info, vha, 0x0077,
+		    "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x0078,
+	    "Verifying loaded RISC code...\n");
+
+	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
+		rval = ha->isp_ops->chip_diag(vha);
+		if (rval)
+			return (rval);
+		rval = qla2x00_setup_chip(vha);
+		if (rval)
+			return (rval);
+	}
+
+	if (IS_QLA84XX(ha)) {
+		ha->cs84xx = qla84xx_get_chip(vha);
+		if (!ha->cs84xx) {
+			ql_log(ql_log_warn, vha, 0x00d0,
+			    "Unable to configure ISP84XX.\n");
+			return QLA_FUNCTION_FAILED;
+		}
+	}
+
+	if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
+		rval = qla2x00_init_rings(vha);
+
+	ha->flags.chip_reset_done = 1;
+
+	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
+		/* Issue verify 84xx FW IOCB to complete 84xx initialization */
+		rval = qla84xx_init_chip(vha);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x00d4,
+			    "Unable to initialize ISP84XX.\n");
+			qla84xx_put_chip(vha);
+		}
+	}
+
+	/* Load the NIC Core f/w if we are the first protocol driver. */
+	if (IS_QLA8031(ha)) {
+		rval = qla83xx_nic_core_fw_load(vha);
+		if (rval)
+			ql_log(ql_log_warn, vha, 0x0124,
+			    "Error in initializing NIC Core f/w.\n");
+	}
+
+	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
+		qla24xx_read_fcp_prio_cfg(vha);
+
+	if (IS_P3P_TYPE(ha))
+		qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
+	else
+		qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
+
+	return (rval);
+}
+
+/**
+ * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2100_pci_config(scsi_qla_host_t *vha)
+{
+	uint16_t w;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	pci_set_master(ha->pdev);
+	pci_try_set_mwi(ha->pdev);
+
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+	pci_disable_rom(ha->pdev);
+
+	/* Get PCI bus information. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2300_pci_config(scsi_qla_host_t *vha)
+{
+	uint16_t	w;
+	unsigned long   flags = 0;
+	uint32_t	cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	pci_set_master(ha->pdev);
+	pci_try_set_mwi(ha->pdev);
+
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+
+	if (IS_QLA2322(ha) || IS_QLA6322(ha))
+		w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+	/*
+	 * If this is a 2300 card and not 2312, reset the
+	 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
+	 * the 2310 also reports itself as a 2300 so we need to get the
+	 * fb revision level -- a 6 indicates it really is a 2300 and
+	 * not a 2310.
+	 */
+	if (IS_QLA2300(ha)) {
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+
+		/* Pause RISC. */
+		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+		for (cnt = 0; cnt < 30000; cnt++) {
+			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+				break;
+
+			udelay(10);
+		}
+
+		/* Select FPM registers. */
+		WRT_REG_WORD(&reg->ctrl_status, 0x20);
+		RD_REG_WORD(&reg->ctrl_status);
+
+		/* Get the fb rev level */
+		ha->fb_rev = RD_FB_CMD_REG(ha, reg);
+
+		if (ha->fb_rev == FPM_2300)
+			pci_clear_mwi(ha->pdev);
+
+		/* Deselect FPM registers. */
+		WRT_REG_WORD(&reg->ctrl_status, 0x0);
+		RD_REG_WORD(&reg->ctrl_status);
+
+		/* Release RISC module. */
+		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+		for (cnt = 0; cnt < 30000; cnt++) {
+			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
+				break;
+
+			udelay(10);
+		}
+
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
+
+	pci_disable_rom(ha->pdev);
+
+	/* Get PCI bus information. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla24xx_pci_config(scsi_qla_host_t *vha)
+{
+	uint16_t w;
+	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	pci_set_master(ha->pdev);
+	pci_try_set_mwi(ha->pdev);
+
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+	w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+	pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
+
+	/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
+	if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
+		pcix_set_mmrbc(ha->pdev, 2048);
+
+	/* PCIe -- adjust Maximum Read Request Size (2048). */
+	if (pci_is_pcie(ha->pdev))
+		pcie_set_readrq(ha->pdev, 4096);
+
+	pci_disable_rom(ha->pdev);
+
+	ha->chip_revision = ha->pdev->revision;
+
+	/* Get PCI bus information. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla25xx_pci_config(scsi_qla_host_t *vha)
+{
+	uint16_t w;
+	struct qla_hw_data *ha = vha->hw;
+
+	pci_set_master(ha->pdev);
+	pci_try_set_mwi(ha->pdev);
+
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+	w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+	/* PCIe -- adjust Maximum Read Request Size (2048). */
+	if (pci_is_pcie(ha->pdev))
+		pcie_set_readrq(ha->pdev, 4096);
+
+	pci_disable_rom(ha->pdev);
+
+	ha->chip_revision = ha->pdev->revision;
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qla2x00_isp_firmware() - Choose firmware image.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_isp_firmware(scsi_qla_host_t *vha)
+{
+	int  rval;
+	uint16_t loop_id, topo, sw_cap;
+	uint8_t domain, area, al_pa;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Assume loading risc code */
+	rval = QLA_FUNCTION_FAILED;
+
+	if (ha->flags.disable_risc_code_load) {
+		ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
+
+		/* Verify checksum of loaded RISC code. */
+		rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
+		if (rval == QLA_SUCCESS) {
+			/* And, verify we are not in ROM code. */
+			rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
+			    &area, &domain, &topo, &sw_cap);
+		}
+	}
+
+	if (rval)
+		ql_dbg(ql_dbg_init, vha, 0x007a,
+		    "**** Load RISC code ****.\n");
+
+	return (rval);
+}
+
+/**
+ * qla2x00_reset_chip() - Reset ISP chip.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla2x00_reset_chip(scsi_qla_host_t *vha)
+{
+	unsigned long   flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint32_t	cnt;
+	uint16_t	cmd;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return;
+
+	ha->isp_ops->disable_intrs(ha);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Turn off master enable */
+	cmd = 0;
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
+	cmd &= ~PCI_COMMAND_MASTER;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
+
+	if (!IS_QLA2100(ha)) {
+		/* Pause RISC. */
+		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+		if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
+			for (cnt = 0; cnt < 30000; cnt++) {
+				if ((RD_REG_WORD(&reg->hccr) &
+				    HCCR_RISC_PAUSE) != 0)
+					break;
+				udelay(100);
+			}
+		} else {
+			RD_REG_WORD(&reg->hccr);	/* PCI Posting. */
+			udelay(10);
+		}
+
+		/* Select FPM registers. */
+		WRT_REG_WORD(&reg->ctrl_status, 0x20);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+
+		/* FPM Soft Reset. */
+		WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
+		RD_REG_WORD(&reg->fpm_diag_config);	/* PCI Posting. */
+
+		/* Toggle Fpm Reset. */
+		if (!IS_QLA2200(ha)) {
+			WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
+			RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+		}
+
+		/* Select frame buffer registers. */
+		WRT_REG_WORD(&reg->ctrl_status, 0x10);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+
+		/* Reset frame buffer FIFOs. */
+		if (IS_QLA2200(ha)) {
+			WRT_FB_CMD_REG(ha, reg, 0xa000);
+			RD_FB_CMD_REG(ha, reg);		/* PCI Posting. */
+		} else {
+			WRT_FB_CMD_REG(ha, reg, 0x00fc);
+
+			/* Read back fb_cmd until zero or 3 seconds max */
+			for (cnt = 0; cnt < 3000; cnt++) {
+				if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
+					break;
+				udelay(100);
+			}
+		}
+
+		/* Select RISC module registers. */
+		WRT_REG_WORD(&reg->ctrl_status, 0);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+
+		/* Reset RISC processor. */
+		WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
+
+		/* Release RISC processor. */
+		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
+	}
+
+	WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+	WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
+
+	/* Reset ISP chip. */
+	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+
+	/* Wait for RISC to recover from reset. */
+	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+		/*
+		 * It is necessary to for a delay here since the card doesn't
+		 * respond to PCI reads during a reset. On some architectures
+		 * this will result in an MCA.
+		 */
+		udelay(20);
+		for (cnt = 30000; cnt; cnt--) {
+			if ((RD_REG_WORD(&reg->ctrl_status) &
+			    CSR_ISP_SOFT_RESET) == 0)
+				break;
+			udelay(100);
+		}
+	} else
+		udelay(10);
+
+	/* Reset RISC processor. */
+	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+
+	WRT_REG_WORD(&reg->semaphore, 0);
+
+	/* Release RISC processor. */
+	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+		for (cnt = 0; cnt < 30000; cnt++) {
+			if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
+				break;
+
+			udelay(100);
+		}
+	} else
+		udelay(100);
+
+	/* Turn on master enable */
+	cmd |= PCI_COMMAND_MASTER;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
+
+	/* Disable RISC pause on FPM parity error. */
+	if (!IS_QLA2100(ha)) {
+		WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
+		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
+	}
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/**
+ * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
+ *
+ * Returns 0 on success.
+ */
+static int
+qla81xx_reset_mpi(scsi_qla_host_t *vha)
+{
+	uint16_t mb[4] = {0x1010, 0, 1, 0};
+
+	if (!IS_QLA81XX(vha->hw))
+		return QLA_SUCCESS;
+
+	return qla81xx_write_mpi_register(vha, mb);
+}
+
+/**
+ * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static inline int
+qla24xx_reset_risc(scsi_qla_host_t *vha)
+{
+	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	uint32_t cnt;
+	uint16_t wd;
+	static int abts_cnt; /* ISP abort retry counts */
+	int rval = QLA_SUCCESS;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Reset RISC. */
+	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+	for (cnt = 0; cnt < 30000; cnt++) {
+		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+			break;
+
+		udelay(10);
+	}
+
+	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
+
+	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
+	    "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
+	    RD_REG_DWORD(&reg->hccr),
+	    RD_REG_DWORD(&reg->ctrl_status),
+	    (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+	udelay(100);
+
+	/* Wait for firmware to complete NVRAM accesses. */
+	RD_REG_WORD(&reg->mailbox0);
+	for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		barrier();
+		if (cnt)
+			udelay(5);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (rval == QLA_SUCCESS)
+		set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
+
+	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
+	    "HCCR: 0x%x, MailBox0 Status 0x%x\n",
+	    RD_REG_DWORD(&reg->hccr),
+	    RD_REG_DWORD(&reg->mailbox0));
+
+	/* Wait for soft-reset to complete. */
+	RD_REG_DWORD(&reg->ctrl_status);
+	for (cnt = 0; cnt < 60; cnt++) {
+		barrier();
+		if ((RD_REG_DWORD(&reg->ctrl_status) &
+		    CSRX_ISP_SOFT_RESET) == 0)
+			break;
+
+		udelay(5);
+	}
+	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+		set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
+
+	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
+	    "HCCR: 0x%x, Soft Reset status: 0x%x\n",
+	    RD_REG_DWORD(&reg->hccr),
+	    RD_REG_DWORD(&reg->ctrl_status));
+
+	/* If required, do an MPI FW reset now */
+	if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
+		if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
+			if (++abts_cnt < 5) {
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
+			} else {
+				/*
+				 * We exhausted the ISP abort retries. We have to
+				 * set the board offline.
+				 */
+				abts_cnt = 0;
+				vha->flags.online = 0;
+			}
+		}
+	}
+
+	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+	RD_REG_DWORD(&reg->hccr);
+
+	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+	RD_REG_DWORD(&reg->hccr);
+
+	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+	RD_REG_DWORD(&reg->hccr);
+
+	RD_REG_WORD(&reg->mailbox0);
+	for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		barrier();
+		if (cnt)
+			udelay(5);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+	if (rval == QLA_SUCCESS)
+		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
+
+	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
+	    "Host Risc 0x%x, mailbox0 0x%x\n",
+	    RD_REG_DWORD(&reg->hccr),
+	     RD_REG_WORD(&reg->mailbox0));
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
+	    "Driver in %s mode\n",
+	    IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
+
+	if (IS_NOPOLLING_TYPE(ha))
+		ha->isp_ops->enable_intrs(ha);
+
+	return rval;
+}
+
+static void
+qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
+{
+	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+	WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+	*data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
+
+}
+
+static void
+qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
+{
+	struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+	WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+	WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+}
+
+static void
+qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
+{
+	uint32_t wd32 = 0;
+	uint delta_msec = 100;
+	uint elapsed_msec = 0;
+	uint timeout_msec;
+	ulong n;
+
+	if (vha->hw->pdev->subsystem_device != 0x0175 &&
+	    vha->hw->pdev->subsystem_device != 0x0240)
+		return;
+
+	WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+	udelay(100);
+
+attempt:
+	timeout_msec = TIMEOUT_SEMAPHORE;
+	n = timeout_msec / delta_msec;
+	while (n--) {
+		qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
+		qla25xx_read_risc_sema_reg(vha, &wd32);
+		if (wd32 & RISC_SEMAPHORE)
+			break;
+		msleep(delta_msec);
+		elapsed_msec += delta_msec;
+		if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+			goto force;
+	}
+
+	if (!(wd32 & RISC_SEMAPHORE))
+		goto force;
+
+	if (!(wd32 & RISC_SEMAPHORE_FORCE))
+		goto acquired;
+
+	qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
+	timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
+	n = timeout_msec / delta_msec;
+	while (n--) {
+		qla25xx_read_risc_sema_reg(vha, &wd32);
+		if (!(wd32 & RISC_SEMAPHORE_FORCE))
+			break;
+		msleep(delta_msec);
+		elapsed_msec += delta_msec;
+		if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+			goto force;
+	}
+
+	if (wd32 & RISC_SEMAPHORE_FORCE)
+		qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
+
+	goto attempt;
+
+force:
+	qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
+
+acquired:
+	return;
+}
+
+/**
+ * qla24xx_reset_chip() - Reset ISP24xx chip.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla24xx_reset_chip(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (pci_channel_offline(ha->pdev) &&
+	    ha->flags.pci_channel_io_perm_failure) {
+		return;
+	}
+
+	ha->isp_ops->disable_intrs(ha);
+
+	qla25xx_manipulate_risc_semaphore(vha);
+
+	/* Perform RISC reset. */
+	qla24xx_reset_risc(vha);
+}
+
+/**
+ * qla2x00_chip_diag() - Test chip for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_chip_diag(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	unsigned long	flags = 0;
+	uint16_t	data;
+	uint32_t	cnt;
+	uint16_t	mb[5];
+	struct req_que *req = ha->req_q_map[0];
+
+	/* Assume a failed state */
+	rval = QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_init, vha, 0x007b,
+	    "Testing device at %lx.\n", (u_long)&reg->flash_address);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Reset ISP chip. */
+	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+
+	/*
+	 * We need to have a delay here since the card will not respond while
+	 * in reset causing an MCA on some architectures.
+	 */
+	udelay(20);
+	data = qla2x00_debounce_register(&reg->ctrl_status);
+	for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
+		udelay(5);
+		data = RD_REG_WORD(&reg->ctrl_status);
+		barrier();
+	}
+
+	if (!cnt)
+		goto chip_diag_failed;
+
+	ql_dbg(ql_dbg_init, vha, 0x007c,
+	    "Reset register cleared by chip reset.\n");
+
+	/* Reset RISC processor. */
+	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+
+	/* Workaround for QLA2312 PCI parity error */
+	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+		data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
+		for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
+			udelay(5);
+			data = RD_MAILBOX_REG(ha, reg, 0);
+			barrier();
+		}
+	} else
+		udelay(10);
+
+	if (!cnt)
+		goto chip_diag_failed;
+
+	/* Check product ID of chip */
+	ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
+
+	mb[1] = RD_MAILBOX_REG(ha, reg, 1);
+	mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+	mb[3] = RD_MAILBOX_REG(ha, reg, 3);
+	mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
+	if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
+	    mb[3] != PROD_ID_3) {
+		ql_log(ql_log_warn, vha, 0x0062,
+		    "Wrong product ID = 0x%x,0x%x,0x%x.\n",
+		    mb[1], mb[2], mb[3]);
+
+		goto chip_diag_failed;
+	}
+	ha->product_id[0] = mb[1];
+	ha->product_id[1] = mb[2];
+	ha->product_id[2] = mb[3];
+	ha->product_id[3] = mb[4];
+
+	/* Adjust fw RISC transfer size */
+	if (req->length > 1024)
+		ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
+	else
+		ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
+		    req->length;
+
+	if (IS_QLA2200(ha) &&
+	    RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
+		/* Limit firmware transfer size with a 2200A */
+		ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
+
+		ha->device_type |= DT_ISP2200A;
+		ha->fw_transfer_size = 128;
+	}
+
+	/* Wrap Incoming Mailboxes Test. */
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
+	rval = qla2x00_mbx_reg_test(vha);
+	if (rval)
+		ql_log(ql_log_warn, vha, 0x0080,
+		    "Failed mailbox send register test.\n");
+	else
+		/* Flag a successful rval */
+		rval = QLA_SUCCESS;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+chip_diag_failed:
+	if (rval)
+		ql_log(ql_log_info, vha, 0x0081,
+		    "Chip diagnostics **** FAILED ****.\n");
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return (rval);
+}
+
+/**
+ * qla24xx_chip_diag() - Test ISP24xx for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla24xx_chip_diag(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	if (IS_P3P_TYPE(ha))
+		return QLA_SUCCESS;
+
+	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+	rval = qla2x00_mbx_reg_test(vha);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0082,
+		    "Failed mailbox send register test.\n");
+	} else {
+		/* Flag a successful rval */
+		rval = QLA_SUCCESS;
+	}
+
+	return rval;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+	    eft_size, fce_size, mq_size;
+	dma_addr_t tc_dma;
+	void *tc;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	if (ha->fw_dump) {
+		ql_dbg(ql_dbg_init, vha, 0x00bd,
+		    "Firmware dump already allocated.\n");
+		return;
+	}
+
+	ha->fw_dumped = 0;
+	ha->fw_dump_cap_flags = 0;
+	dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+	req_q_size = rsp_q_size = 0;
+
+	if (IS_QLA27XX(ha))
+		goto try_fce;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+		fixed_size = sizeof(struct qla2100_fw_dump);
+	} else if (IS_QLA23XX(ha)) {
+		fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+		mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+		    sizeof(uint16_t);
+	} else if (IS_FWI2_CAPABLE(ha)) {
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+			fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+		else if (IS_QLA81XX(ha))
+			fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+		else if (IS_QLA25XX(ha))
+			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+		else
+			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+		mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+		    sizeof(uint32_t);
+		if (ha->mqenable) {
+			if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+				mq_size = sizeof(struct qla2xxx_mq_chain);
+			/*
+			 * Allocate maximum buffer size for all queues.
+			 * Resizing must be done at end-of-dump processing.
+			 */
+			mq_size += ha->max_req_queues *
+			    (req->length * sizeof(request_t));
+			mq_size += ha->max_rsp_queues *
+			    (rsp->length * sizeof(response_t));
+		}
+		if (ha->tgt.atio_ring)
+			mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+		/* Allocate memory for Fibre Channel Event Buffer. */
+		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+		    !IS_QLA27XX(ha))
+			goto try_eft;
+
+try_fce:
+		if (ha->fce)
+			dma_free_coherent(&ha->pdev->dev,
+			    FCE_SIZE, ha->fce, ha->fce_dma);
+
+		/* Allocate memory for Fibre Channel Event Buffer. */
+		tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+					 GFP_KERNEL);
+		if (!tc) {
+			ql_log(ql_log_warn, vha, 0x00be,
+			    "Unable to allocate (%d KB) for FCE.\n",
+			    FCE_SIZE / 1024);
+			goto try_eft;
+		}
+
+		rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+		    ha->fce_mb, &ha->fce_bufs);
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0x00bf,
+			    "Unable to initialize FCE (%d).\n", rval);
+			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
+			    tc_dma);
+			ha->flags.fce_enabled = 0;
+			goto try_eft;
+		}
+		ql_dbg(ql_dbg_init, vha, 0x00c0,
+		    "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
+
+		fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+		ha->flags.fce_enabled = 1;
+		ha->fce_dma = tc_dma;
+		ha->fce = tc;
+
+try_eft:
+		if (ha->eft)
+			dma_free_coherent(&ha->pdev->dev,
+			    EFT_SIZE, ha->eft, ha->eft_dma);
+
+		/* Allocate memory for Extended Trace Buffer. */
+		tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+					 GFP_KERNEL);
+		if (!tc) {
+			ql_log(ql_log_warn, vha, 0x00c1,
+			    "Unable to allocate (%d KB) for EFT.\n",
+			    EFT_SIZE / 1024);
+			goto cont_alloc;
+		}
+
+		rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0x00c2,
+			    "Unable to initialize EFT (%d).\n", rval);
+			dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
+			    tc_dma);
+			goto cont_alloc;
+		}
+		ql_dbg(ql_dbg_init, vha, 0x00c3,
+		    "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+
+		eft_size = EFT_SIZE;
+		ha->eft_dma = tc_dma;
+		ha->eft = tc;
+	}
+
+cont_alloc:
+	if (IS_QLA27XX(ha)) {
+		if (!ha->fw_dump_template) {
+			ql_log(ql_log_warn, vha, 0x00ba,
+			    "Failed missing fwdump template\n");
+			return;
+		}
+		dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+		ql_dbg(ql_dbg_init, vha, 0x00fa,
+		    "-> allocating fwdump (%x bytes)...\n", dump_size);
+		goto allocate;
+	}
+
+	req_q_size = req->length * sizeof(request_t);
+	rsp_q_size = rsp->length * sizeof(response_t);
+	dump_size = offsetof(struct qla2xxx_fw_dump, isp);
+	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
+	ha->chain_offset = dump_size;
+	dump_size += mq_size + fce_size;
+
+	if (ha->exchoffld_buf)
+		dump_size += sizeof(struct qla2xxx_offld_chain) +
+			ha->exchoffld_size;
+	if (ha->exlogin_buf)
+		dump_size += sizeof(struct qla2xxx_offld_chain) +
+			ha->exlogin_size;
+
+allocate:
+	ha->fw_dump = vmalloc(dump_size);
+	if (!ha->fw_dump) {
+		ql_log(ql_log_warn, vha, 0x00c4,
+		    "Unable to allocate (%d KB) for firmware dump.\n",
+		    dump_size / 1024);
+
+		if (ha->fce) {
+			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+			    ha->fce_dma);
+			ha->fce = NULL;
+			ha->fce_dma = 0;
+		}
+
+		if (ha->eft) {
+			dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
+			    ha->eft_dma);
+			ha->eft = NULL;
+			ha->eft_dma = 0;
+		}
+		return;
+	}
+	ha->fw_dump_len = dump_size;
+	ql_dbg(ql_dbg_init, vha, 0x00c5,
+	    "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
+
+	if (IS_QLA27XX(ha))
+		return;
+
+	ha->fw_dump->signature[0] = 'Q';
+	ha->fw_dump->signature[1] = 'L';
+	ha->fw_dump->signature[2] = 'G';
+	ha->fw_dump->signature[3] = 'C';
+	ha->fw_dump->version = htonl(1);
+
+	ha->fw_dump->fixed_size = htonl(fixed_size);
+	ha->fw_dump->mem_size = htonl(mem_size);
+	ha->fw_dump->req_q_size = htonl(req_q_size);
+	ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+	ha->fw_dump->eft_size = htonl(eft_size);
+	ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+	ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+	ha->fw_dump->header_size =
+	    htonl(offsetof(struct qla2xxx_fw_dump, isp));
+}
+
+static int
+qla81xx_mpi_sync(scsi_qla_host_t *vha)
+{
+#define MPS_MASK	0xe0
+	int rval;
+	uint16_t dc;
+	uint32_t dw;
+
+	if (!IS_QLA81XX(vha->hw))
+		return QLA_SUCCESS;
+
+	rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x0105,
+		    "Unable to acquire semaphore.\n");
+		goto done;
+	}
+
+	pci_read_config_word(vha->hw->pdev, 0x54, &dc);
+	rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
+		goto done_release;
+	}
+
+	dc &= MPS_MASK;
+	if (dc == (dw & MPS_MASK))
+		goto done_release;
+
+	dw &= ~MPS_MASK;
+	dw |= dc;
+	rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
+	}
+
+done_release:
+	rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x006d,
+		    "Unable to release semaphore.\n");
+	}
+
+done:
+	return rval;
+}
+
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+	/* Don't try to reallocate the array */
+	if (req->outstanding_cmds)
+		return QLA_SUCCESS;
+
+	if (!IS_FWI2_CAPABLE(ha))
+		req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+	else {
+		if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
+			req->num_outstanding_cmds = ha->cur_fw_xcb_count;
+		else
+			req->num_outstanding_cmds = ha->cur_fw_iocb_count;
+	}
+
+	req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+	    req->num_outstanding_cmds, GFP_KERNEL);
+
+	if (!req->outstanding_cmds) {
+		/*
+		 * Try to allocate a minimal size just so we can get through
+		 * initialization.
+		 */
+		req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+		req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+		    req->num_outstanding_cmds, GFP_KERNEL);
+
+		if (!req->outstanding_cmds) {
+			ql_log(ql_log_fatal, NULL, 0x0126,
+			    "Failed to allocate memory for "
+			    "outstanding_cmds for req_que %p.\n", req);
+			req->num_outstanding_cmds = 0;
+			return QLA_FUNCTION_FAILED;
+		}
+	}
+
+	return QLA_SUCCESS;
+}
+
+#define PRINT_FIELD(_field, _flag, _str) {		\
+	if (a0->_field & _flag) {\
+		if (p) {\
+			strcat(ptr, "|");\
+			ptr++;\
+			leftover--;\
+		} \
+		len = snprintf(ptr, leftover, "%s", _str);	\
+		p = 1;\
+		leftover -= len;\
+		ptr += len; \
+	} \
+}
+
+static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
+{
+#define STR_LEN 64
+	struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
+	u8 str[STR_LEN], *ptr, p;
+	int leftover, len;
+
+	memset(str, 0, STR_LEN);
+	snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
+	ql_dbg(ql_dbg_init, vha, 0x015a,
+	    "SFP MFG Name: %s\n", str);
+
+	memset(str, 0, STR_LEN);
+	snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
+	ql_dbg(ql_dbg_init, vha, 0x015c,
+	    "SFP Part Name: %s\n", str);
+
+	/* media */
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
+	PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
+	PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
+	PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
+	PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
+	PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
+	PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
+	ql_dbg(ql_dbg_init, vha, 0x0160,
+	    "SFP Media: %s\n", str);
+
+	/* link length */
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
+	ql_dbg(ql_dbg_init, vha, 0x0196,
+	    "SFP Link Length: %s\n", str);
+
+	memset(str, 0, STR_LEN);
+	ptr = str;
+	leftover = STR_LEN;
+	p = len = 0;
+	PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
+	PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
+	PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
+	ql_dbg(ql_dbg_init, vha, 0x016e,
+	    "SFP FC Link Tech: %s\n", str);
+
+	if (a0->length_km)
+		ql_dbg(ql_dbg_init, vha, 0x016f,
+		    "SFP Distant: %d km\n", a0->length_km);
+	if (a0->length_100m)
+		ql_dbg(ql_dbg_init, vha, 0x0170,
+		    "SFP Distant: %d m\n", a0->length_100m*100);
+	if (a0->length_50um_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0189,
+		    "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
+	if (a0->length_62um_10m)
+		ql_dbg(ql_dbg_init, vha, 0x018a,
+		  "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
+	if (a0->length_om4_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0194,
+		    "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
+	if (a0->length_om3_10m)
+		ql_dbg(ql_dbg_init, vha, 0x0195,
+		    "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
+}
+
+
+/*
+ * Return Code:
+ *   QLA_SUCCESS: no action
+ *   QLA_INTERFACE_ERROR: SFP is not there.
+ *   QLA_FUNCTION_FAILED: detected New SFP
+ */
+int
+qla24xx_detect_sfp(scsi_qla_host_t *vha)
+{
+	int rc = QLA_SUCCESS;
+	struct sff_8247_a0 *a;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!AUTO_DETECT_SFP_SUPPORT(vha))
+		goto out;
+
+	rc = qla2x00_read_sfp_dev(vha, NULL, 0);
+	if (rc)
+		goto out;
+
+	a = (struct sff_8247_a0 *)vha->hw->sfp_data;
+	qla2xxx_print_sfp_info(vha);
+
+	if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
+		/* long range */
+		ha->flags.detected_lr_sfp = 1;
+
+		if (a->length_km > 5 || a->length_100m > 50)
+			ha->long_range_distance = LR_DISTANCE_10K;
+		else
+			ha->long_range_distance = LR_DISTANCE_5K;
+
+		if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
+			ql_dbg(ql_dbg_async, vha, 0x507b,
+			    "Detected Long Range SFP.\n");
+	} else {
+		/* short range */
+		ha->flags.detected_lr_sfp = 0;
+		if (ha->flags.using_lr_setting)
+			ql_dbg(ql_dbg_async, vha, 0x5084,
+			    "Detected Short Range SFP.\n");
+	}
+
+	if (!vha->flags.init_done)
+		rc = QLA_SUCCESS;
+out:
+	return rc;
+}
+
+/**
+ * qla2x00_setup_chip() - Load and start RISC firmware.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_setup_chip(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint32_t srisc_address = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	unsigned long flags;
+	uint16_t fw_major_version;
+
+	if (IS_P3P_TYPE(ha)) {
+		rval = ha->isp_ops->load_risc(vha, &srisc_address);
+		if (rval == QLA_SUCCESS) {
+			qla2x00_stop_firmware(vha);
+			goto enable_82xx_npiv;
+		} else
+			goto failed;
+	}
+
+	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
+		/* Disable SRAM, Instruction RAM and GP RAM parity.  */
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+		RD_REG_WORD(&reg->hccr);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	qla81xx_mpi_sync(vha);
+
+	/* Load firmware sequences */
+	rval = ha->isp_ops->load_risc(vha, &srisc_address);
+	if (rval == QLA_SUCCESS) {
+		ql_dbg(ql_dbg_init, vha, 0x00c9,
+		    "Verifying Checksum of loaded RISC code.\n");
+
+		rval = qla2x00_verify_checksum(vha, srisc_address);
+		if (rval == QLA_SUCCESS) {
+			/* Start firmware execution. */
+			ql_dbg(ql_dbg_init, vha, 0x00ca,
+			    "Starting firmware.\n");
+
+			if (ql2xexlogins)
+				ha->flags.exlogins_enabled = 1;
+
+			if (qla_is_exch_offld_enabled(vha))
+				ha->flags.exchoffld_enabled = 1;
+
+			rval = qla2x00_execute_fw(vha, srisc_address);
+			/* Retrieve firmware information. */
+			if (rval == QLA_SUCCESS) {
+				qla24xx_detect_sfp(vha);
+
+				rval = qla2x00_set_exlogins_buffer(vha);
+				if (rval != QLA_SUCCESS)
+					goto failed;
+
+				rval = qla2x00_set_exchoffld_buffer(vha);
+				if (rval != QLA_SUCCESS)
+					goto failed;
+
+enable_82xx_npiv:
+				fw_major_version = ha->fw_major_version;
+				if (IS_P3P_TYPE(ha))
+					qla82xx_check_md_needed(vha);
+				else
+					rval = qla2x00_get_fw_version(vha);
+				if (rval != QLA_SUCCESS)
+					goto failed;
+				ha->flags.npiv_supported = 0;
+				if (IS_QLA2XXX_MIDTYPE(ha) &&
+					 (ha->fw_attributes & BIT_2)) {
+					ha->flags.npiv_supported = 1;
+					if ((!ha->max_npiv_vports) ||
+					    ((ha->max_npiv_vports + 1) %
+					    MIN_MULTI_ID_FABRIC))
+						ha->max_npiv_vports =
+						    MIN_MULTI_ID_FABRIC - 1;
+				}
+				qla2x00_get_resource_cnts(vha);
+
+				/*
+				 * Allocate the array of outstanding commands
+				 * now that we know the firmware resources.
+				 */
+				rval = qla2x00_alloc_outstanding_cmds(ha,
+				    vha->req);
+				if (rval != QLA_SUCCESS)
+					goto failed;
+
+				if (!fw_major_version && ql2xallocfwdump
+				    && !(IS_P3P_TYPE(ha)))
+					qla2x00_alloc_fw_dump(vha);
+			} else {
+				goto failed;
+			}
+		} else {
+			ql_log(ql_log_fatal, vha, 0x00cd,
+			    "ISP Firmware failed checksum.\n");
+			goto failed;
+		}
+	} else
+		goto failed;
+
+	if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
+		/* Enable proper parity. */
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		if (IS_QLA2300(ha))
+			/* SRAM parity */
+			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+		else
+			/* SRAM, Instruction RAM and GP RAM parity */
+			WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+		RD_REG_WORD(&reg->hccr);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	if (IS_QLA27XX(ha))
+		ha->flags.fac_supported = 1;
+	else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
+		uint32_t size;
+
+		rval = qla81xx_fac_get_sector_size(vha, &size);
+		if (rval == QLA_SUCCESS) {
+			ha->flags.fac_supported = 1;
+			ha->fdt_block_size = size << 2;
+		} else {
+			ql_log(ql_log_warn, vha, 0x00ce,
+			    "Unsupported FAC firmware (%d.%02d.%02d).\n",
+			    ha->fw_major_version, ha->fw_minor_version,
+			    ha->fw_subminor_version);
+
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+				ha->flags.fac_supported = 0;
+				rval = QLA_SUCCESS;
+			}
+		}
+	}
+failed:
+	if (rval) {
+		ql_log(ql_log_fatal, vha, 0x00cf,
+		    "Setup chip ****FAILED****.\n");
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qla2x00_init_response_q_entries(struct rsp_que *rsp)
+{
+	uint16_t cnt;
+	response_t *pkt;
+
+	rsp->ring_ptr = rsp->ring;
+	rsp->ring_index    = 0;
+	rsp->status_srb = NULL;
+	pkt = rsp->ring_ptr;
+	for (cnt = 0; cnt < rsp->length; cnt++) {
+		pkt->signature = RESPONSE_PROCESSED;
+		pkt++;
+	}
+}
+
+/**
+ * qla2x00_update_fw_options() - Read and process firmware options.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla2x00_update_fw_options(scsi_qla_host_t *vha)
+{
+	uint16_t swing, emphasis, tx_sens, rx_sens;
+	struct qla_hw_data *ha = vha->hw;
+
+	memset(ha->fw_options, 0, sizeof(ha->fw_options));
+	qla2x00_get_fw_options(vha, ha->fw_options);
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return;
+
+	/* Serial Link options. */
+	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
+	    "Serial link options.\n");
+	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
+	    (uint8_t *)&ha->fw_seriallink_options,
+	    sizeof(ha->fw_seriallink_options));
+
+	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
+	if (ha->fw_seriallink_options[3] & BIT_2) {
+		ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
+
+		/*  1G settings */
+		swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
+		emphasis = (ha->fw_seriallink_options[2] &
+		    (BIT_4 | BIT_3)) >> 3;
+		tx_sens = ha->fw_seriallink_options[0] &
+		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		rx_sens = (ha->fw_seriallink_options[0] &
+		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
+		ha->fw_options[10] = (emphasis << 14) | (swing << 8);
+		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
+			if (rx_sens == 0x0)
+				rx_sens = 0x3;
+			ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
+		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
+			ha->fw_options[10] |= BIT_5 |
+			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
+			    (tx_sens & (BIT_1 | BIT_0));
+
+		/*  2G settings */
+		swing = (ha->fw_seriallink_options[2] &
+		    (BIT_7 | BIT_6 | BIT_5)) >> 5;
+		emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
+		tx_sens = ha->fw_seriallink_options[1] &
+		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		rx_sens = (ha->fw_seriallink_options[1] &
+		    (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
+		ha->fw_options[11] = (emphasis << 14) | (swing << 8);
+		if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
+			if (rx_sens == 0x0)
+				rx_sens = 0x3;
+			ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
+		} else if (IS_QLA2322(ha) || IS_QLA6322(ha))
+			ha->fw_options[11] |= BIT_5 |
+			    ((rx_sens & (BIT_1 | BIT_0)) << 2) |
+			    (tx_sens & (BIT_1 | BIT_0));
+	}
+
+	/* FCP2 options. */
+	/*  Return command IOCBs without waiting for an ABTS to complete. */
+	ha->fw_options[3] |= BIT_13;
+
+	/* LED scheme. */
+	if (ha->flags.enable_led_scheme)
+		ha->fw_options[2] |= BIT_12;
+
+	/* Detect ISP6312. */
+	if (IS_QLA6312(ha))
+		ha->fw_options[2] |= BIT_13;
+
+	/* Set Retry FLOGI in case of P2P connection */
+	if (ha->operating_mode == P2P) {
+		ha->fw_options[2] |= BIT_3;
+		ql_dbg(ql_dbg_disc, vha, 0x2100,
+		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
+			__func__, ha->fw_options[2]);
+	}
+
+	/* Update firmware options. */
+	qla2x00_set_fw_options(vha, ha->fw_options);
+}
+
+void
+qla24xx_update_fw_options(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_P3P_TYPE(ha))
+		return;
+
+	/*  Hold status IOCBs until ABTS response received. */
+	if (ql2xfwholdabts)
+		ha->fw_options[3] |= BIT_12;
+
+	/* Set Retry FLOGI in case of P2P connection */
+	if (ha->operating_mode == P2P) {
+		ha->fw_options[2] |= BIT_3;
+		ql_dbg(ql_dbg_disc, vha, 0x2101,
+		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
+			__func__, ha->fw_options[2]);
+	}
+
+	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
+	if (ql2xmvasynctoatio &&
+	    (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+		if (qla_tgt_mode_enabled(vha) ||
+		    qla_dual_mode_enabled(vha))
+			ha->fw_options[2] |= BIT_11;
+		else
+			ha->fw_options[2] &= ~BIT_11;
+	}
+
+	if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		/*
+		 * Tell FW to track each exchange to prevent
+		 * driver from using stale exchange.
+		 */
+		if (qla_tgt_mode_enabled(vha) ||
+		    qla_dual_mode_enabled(vha))
+			ha->fw_options[2] |= BIT_4;
+		else
+			ha->fw_options[2] &= ~BIT_4;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x00e8,
+	    "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+	    __func__, ha->fw_options[1], ha->fw_options[2],
+	    ha->fw_options[3], vha->host->active_mode);
+
+	if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
+		qla2x00_set_fw_options(vha, ha->fw_options);
+
+	/* Update Serial Link options. */
+	if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
+		return;
+
+	rval = qla2x00_set_serdes_params(vha,
+	    le16_to_cpu(ha->fw_seriallink_options24[1]),
+	    le16_to_cpu(ha->fw_seriallink_options24[2]),
+	    le16_to_cpu(ha->fw_seriallink_options24[3]));
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x0104,
+		    "Unable to update Serial Link options (%x).\n", rval);
+	}
+}
+
+void
+qla2x00_config_rings(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	/* Setup ring parameters in initialization control block. */
+	ha->init_cb->request_q_outpointer = cpu_to_le16(0);
+	ha->init_cb->response_q_inpointer = cpu_to_le16(0);
+	ha->init_cb->request_q_length = cpu_to_le16(req->length);
+	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
+	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+	ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+	ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+	ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
+	WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
+	WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
+	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
+	RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg));		/* PCI Posting. */
+}
+
+void
+qla24xx_config_rings(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t *reg = ISP_QUE_REG(ha, 0);
+	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+	struct qla_msix_entry *msix;
+	struct init_cb_24xx *icb;
+	uint16_t rid = 0;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	/* Setup ring parameters in initialization control block. */
+	icb = (struct init_cb_24xx *)ha->init_cb;
+	icb->request_q_outpointer = cpu_to_le16(0);
+	icb->response_q_inpointer = cpu_to_le16(0);
+	icb->request_q_length = cpu_to_le16(req->length);
+	icb->response_q_length = cpu_to_le16(rsp->length);
+	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+	/* Setup ATIO queue dma pointers for target mode */
+	icb->atio_q_inpointer = cpu_to_le16(0);
+	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
+	if (IS_SHADOW_REG_CAPABLE(ha))
+		icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
+
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+		icb->rid = cpu_to_le16(rid);
+		if (ha->flags.msix_enabled) {
+			msix = &ha->msix_entries[1];
+			ql_dbg(ql_dbg_init, vha, 0x0019,
+			    "Registering vector 0x%x for base que.\n",
+			    msix->entry);
+			icb->msix = cpu_to_le16(msix->entry);
+		}
+		/* Use alternate PCI bus number */
+		if (MSB(rid))
+			icb->firmware_options_2 |= cpu_to_le32(BIT_19);
+		/* Use alternate PCI devfn */
+		if (LSB(rid))
+			icb->firmware_options_2 |= cpu_to_le32(BIT_18);
+
+		/* Use Disable MSIX Handshake mode for capable adapters */
+		if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
+		    (ha->flags.msix_enabled)) {
+			icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
+			ha->flags.disable_msix_handshake = 1;
+			ql_dbg(ql_dbg_init, vha, 0x00fe,
+			    "MSIX Handshake Disable Mode turned on.\n");
+		} else {
+			icb->firmware_options_2 |= cpu_to_le32(BIT_22);
+		}
+		icb->firmware_options_2 |= cpu_to_le32(BIT_23);
+
+		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
+		WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
+		WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
+		WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+	} else {
+		WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
+		WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
+		WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
+		WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+	}
+	qlt_24xx_config_rings(vha);
+
+	/* PCI posting */
+	RD_REG_DWORD(&ioreg->hccr);
+}
+
+/**
+ * qla2x00_init_rings() - Initializes firmware.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+int
+qla2x00_init_rings(scsi_qla_host_t *vha)
+{
+	int	rval;
+	unsigned long flags = 0;
+	int cnt, que;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+	struct rsp_que *rsp;
+	struct mid_init_cb_24xx *mid_init_cb =
+	    (struct mid_init_cb_24xx *) ha->init_cb;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Clear outstanding commands array. */
+	for (que = 0; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req || !test_bit(que, ha->req_qid_map))
+			continue;
+		req->out_ptr = (void *)(req->ring + req->length);
+		*req->out_ptr = 0;
+		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
+			req->outstanding_cmds[cnt] = NULL;
+
+		req->current_outstanding_cmd = 1;
+
+		/* Initialize firmware. */
+		req->ring_ptr  = req->ring;
+		req->ring_index    = 0;
+		req->cnt      = req->length;
+	}
+
+	for (que = 0; que < ha->max_rsp_queues; que++) {
+		rsp = ha->rsp_q_map[que];
+		if (!rsp || !test_bit(que, ha->rsp_qid_map))
+			continue;
+		rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+		*rsp->in_ptr = 0;
+		/* Initialize response queue entries */
+		if (IS_QLAFX00(ha))
+			qlafx00_init_response_q_entries(rsp);
+		else
+			qla2x00_init_response_q_entries(rsp);
+	}
+
+	ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+	ha->tgt.atio_ring_index = 0;
+	/* Initialize ATIO queue entries */
+	qlt_init_atio_q_entries(vha);
+
+	ha->isp_ops->config_rings(vha);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
+
+	if (IS_QLAFX00(ha)) {
+		rval = qlafx00_init_firmware(vha, ha->init_cb_size);
+		goto next_check;
+	}
+
+	/* Update any ISP specific firmware options before initialization. */
+	ha->isp_ops->update_fw_options(vha);
+
+	if (ha->flags.npiv_supported) {
+		if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
+			ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
+		mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
+	}
+
+	if (IS_FWI2_CAPABLE(ha)) {
+		mid_init_cb->options = cpu_to_le16(BIT_1);
+		mid_init_cb->init_cb.execution_throttle =
+		    cpu_to_le16(ha->cur_fw_xcb_count);
+		ha->flags.dport_enabled =
+		    (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
+		ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
+		    (ha->flags.dport_enabled) ? "enabled" : "disabled");
+		/* FA-WWPN Status */
+		ha->flags.fawwpn_enabled =
+		    (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
+		ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
+		    (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+	}
+
+	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+next_check:
+	if (rval) {
+		ql_log(ql_log_fatal, vha, 0x00d2,
+		    "Init Firmware **** FAILED ****.\n");
+	} else {
+		ql_dbg(ql_dbg_init, vha, 0x00d3,
+		    "Init Firmware -- success.\n");
+		QLA_FW_STARTED(ha);
+	}
+
+	return (rval);
+}
+
+/**
+ * qla2x00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fw_ready(scsi_qla_host_t *vha)
+{
+	int		rval;
+	unsigned long	wtime, mtime, cs84xx_time;
+	uint16_t	min_wait;	/* Minimum wait time if loop is down */
+	uint16_t	wait_time;	/* Wait time if loop is coming ready */
+	uint16_t	state[6];
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLAFX00(vha->hw))
+		return qlafx00_fw_ready(vha);
+
+	rval = QLA_SUCCESS;
+
+	/* Time to wait for loop down */
+	if (IS_P3P_TYPE(ha))
+		min_wait = 30;
+	else
+		min_wait = 20;
+
+	/*
+	 * Firmware should take at most one RATOV to login, plus 5 seconds for
+	 * our own processing.
+	 */
+	if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
+		wait_time = min_wait;
+	}
+
+	/* Min wait time if loop down */
+	mtime = jiffies + (min_wait * HZ);
+
+	/* wait time before firmware ready */
+	wtime = jiffies + (wait_time * HZ);
+
+	/* Wait for ISP to finish LIP */
+	if (!vha->flags.init_done)
+		ql_log(ql_log_info, vha, 0x801e,
+		    "Waiting for LIP to complete.\n");
+
+	do {
+		memset(state, -1, sizeof(state));
+		rval = qla2x00_get_firmware_state(vha, state);
+		if (rval == QLA_SUCCESS) {
+			if (state[0] < FSTATE_LOSS_OF_SYNC) {
+				vha->device_flags &= ~DFLG_NO_CABLE;
+			}
+			if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
+				ql_dbg(ql_dbg_taskm, vha, 0x801f,
+				    "fw_state=%x 84xx=%x.\n", state[0],
+				    state[2]);
+				if ((state[2] & FSTATE_LOGGED_IN) &&
+				     (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
+					ql_dbg(ql_dbg_taskm, vha, 0x8028,
+					    "Sending verify iocb.\n");
+
+					cs84xx_time = jiffies;
+					rval = qla84xx_init_chip(vha);
+					if (rval != QLA_SUCCESS) {
+						ql_log(ql_log_warn,
+						    vha, 0x8007,
+						    "Init chip failed.\n");
+						break;
+					}
+
+					/* Add time taken to initialize. */
+					cs84xx_time = jiffies - cs84xx_time;
+					wtime += cs84xx_time;
+					mtime += cs84xx_time;
+					ql_dbg(ql_dbg_taskm, vha, 0x8008,
+					    "Increasing wait time by %ld. "
+					    "New time %ld.\n", cs84xx_time,
+					    wtime);
+				}
+			} else if (state[0] == FSTATE_READY) {
+				ql_dbg(ql_dbg_taskm, vha, 0x8037,
+				    "F/W Ready - OK.\n");
+
+				qla2x00_get_retry_cnt(vha, &ha->retry_count,
+				    &ha->login_timeout, &ha->r_a_tov);
+
+				rval = QLA_SUCCESS;
+				break;
+			}
+
+			rval = QLA_FUNCTION_FAILED;
+
+			if (atomic_read(&vha->loop_down_timer) &&
+			    state[0] != FSTATE_READY) {
+				/* Loop down. Timeout on min_wait for states
+				 * other than Wait for Login.
+				 */
+				if (time_after_eq(jiffies, mtime)) {
+					ql_log(ql_log_info, vha, 0x8038,
+					    "Cable is unplugged...\n");
+
+					vha->device_flags |= DFLG_NO_CABLE;
+					break;
+				}
+			}
+		} else {
+			/* Mailbox cmd failed. Timeout on min_wait. */
+			if (time_after_eq(jiffies, mtime) ||
+				ha->flags.isp82xx_fw_hung)
+				break;
+		}
+
+		if (time_after_eq(jiffies, wtime))
+			break;
+
+		/* Delay for a while */
+		msleep(500);
+	} while (1);
+
+	ql_dbg(ql_dbg_taskm, vha, 0x803a,
+	    "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
+	    state[1], state[2], state[3], state[4], state[5], jiffies);
+
+	if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
+		ql_log(ql_log_warn, vha, 0x803b,
+		    "Firmware ready **** FAILED ****.\n");
+	}
+
+	return (rval);
+}
+
+/*
+*  qla2x00_configure_hba
+*      Setup adapter context.
+*
+* Input:
+*      ha = adapter state pointer.
+*
+* Returns:
+*      0 = success
+*
+* Context:
+*      Kernel context.
+*/
+static int
+qla2x00_configure_hba(scsi_qla_host_t *vha)
+{
+	int       rval;
+	uint16_t      loop_id;
+	uint16_t      topo;
+	uint16_t      sw_cap;
+	uint8_t       al_pa;
+	uint8_t       area;
+	uint8_t       domain;
+	char		connect_type[22];
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	port_id_t id;
+
+	/* Get host addresses. */
+	rval = qla2x00_get_adapter_id(vha,
+	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
+	if (rval != QLA_SUCCESS) {
+		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
+		    IS_CNA_CAPABLE(ha) ||
+		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
+			ql_dbg(ql_dbg_disc, vha, 0x2008,
+			    "Loop is in a transition state.\n");
+		} else {
+			ql_log(ql_log_warn, vha, 0x2009,
+			    "Unable to get host loop ID.\n");
+			if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+			    (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+				ql_log(ql_log_warn, vha, 0x1151,
+				    "Doing link init.\n");
+				if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+					return rval;
+			}
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		}
+		return (rval);
+	}
+
+	if (topo == 4) {
+		ql_log(ql_log_info, vha, 0x200a,
+		    "Cannot get topology - retrying.\n");
+		return (QLA_FUNCTION_FAILED);
+	}
+
+	vha->loop_id = loop_id;
+
+	/* initialize */
+	ha->min_external_loopid = SNS_FIRST_LOOP_ID;
+	ha->operating_mode = LOOP;
+	ha->switch_cap = 0;
+
+	switch (topo) {
+	case 0:
+		ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
+		ha->current_topology = ISP_CFG_NL;
+		strcpy(connect_type, "(Loop)");
+		break;
+
+	case 1:
+		ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
+		ha->switch_cap = sw_cap;
+		ha->current_topology = ISP_CFG_FL;
+		strcpy(connect_type, "(FL_Port)");
+		break;
+
+	case 2:
+		ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
+		ha->operating_mode = P2P;
+		ha->current_topology = ISP_CFG_N;
+		strcpy(connect_type, "(N_Port-to-N_Port)");
+		break;
+
+	case 3:
+		ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
+		ha->switch_cap = sw_cap;
+		ha->operating_mode = P2P;
+		ha->current_topology = ISP_CFG_F;
+		strcpy(connect_type, "(F_Port)");
+		break;
+
+	default:
+		ql_dbg(ql_dbg_disc, vha, 0x200f,
+		    "HBA in unknown topology %x, using NL.\n", topo);
+		ha->current_topology = ISP_CFG_NL;
+		strcpy(connect_type, "(Loop)");
+		break;
+	}
+
+	/* Save Host port and loop ID. */
+	/* byte order - Big Endian */
+	id.b.domain = domain;
+	id.b.area = area;
+	id.b.al_pa = al_pa;
+	id.b.rsvd_1 = 0;
+	qlt_update_host_map(vha, id);
+
+	if (!vha->flags.init_done)
+		ql_log(ql_log_info, vha, 0x2010,
+		    "Topology - %s, Host Loop address 0x%x.\n",
+		    connect_type, vha->loop_id);
+
+	return(rval);
+}
+
+inline void
+qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+	char *def)
+{
+	char *st, *en;
+	uint16_t index;
+	struct qla_hw_data *ha = vha->hw;
+	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
+
+	if (memcmp(model, BINZERO, len) != 0) {
+		strncpy(ha->model_number, model, len);
+		st = en = ha->model_number;
+		en += len - 1;
+		while (en > st) {
+			if (*en != 0x20 && *en != 0x00)
+				break;
+			*en-- = '\0';
+		}
+
+		index = (ha->pdev->subsystem_device & 0xff);
+		if (use_tbl &&
+		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+		    index < QLA_MODEL_NAMES)
+			strncpy(ha->model_desc,
+			    qla2x00_model_name[index * 2 + 1],
+			    sizeof(ha->model_desc) - 1);
+	} else {
+		index = (ha->pdev->subsystem_device & 0xff);
+		if (use_tbl &&
+		    ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+		    index < QLA_MODEL_NAMES) {
+			strcpy(ha->model_number,
+			    qla2x00_model_name[index * 2]);
+			strncpy(ha->model_desc,
+			    qla2x00_model_name[index * 2 + 1],
+			    sizeof(ha->model_desc) - 1);
+		} else {
+			strcpy(ha->model_number, def);
+		}
+	}
+	if (IS_FWI2_CAPABLE(ha))
+		qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
+		    sizeof(ha->model_desc));
+}
+
+/* On sparc systems, obtain port and node WWN from firmware
+ * properties.
+ */
+static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
+{
+#ifdef CONFIG_SPARC
+	struct qla_hw_data *ha = vha->hw;
+	struct pci_dev *pdev = ha->pdev;
+	struct device_node *dp = pci_device_to_OF_node(pdev);
+	const u8 *val;
+	int len;
+
+	val = of_get_property(dp, "port-wwn", &len);
+	if (val && len >= WWN_SIZE)
+		memcpy(nv->port_name, val, WWN_SIZE);
+
+	val = of_get_property(dp, "node-wwn", &len);
+	if (val && len >= WWN_SIZE)
+		memcpy(nv->node_name, val, WWN_SIZE);
+#endif
+}
+
+/*
+* NVRAM configuration for ISP 2xxx
+*
+* Input:
+*      ha                = adapter block pointer.
+*
+* Output:
+*      initialization control block in response_ring
+*      host adapters parameters in host adapter block
+*
+* Returns:
+*      0 = success.
+*/
+int
+qla2x00_nvram_config(scsi_qla_host_t *vha)
+{
+	int             rval;
+	uint8_t         chksum = 0;
+	uint16_t        cnt;
+	uint8_t         *dptr1, *dptr2;
+	struct qla_hw_data *ha = vha->hw;
+	init_cb_t       *icb = ha->init_cb;
+	nvram_t         *nv = ha->nvram;
+	uint8_t         *ptr = ha->nvram;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	rval = QLA_SUCCESS;
+
+	/* Determine NVRAM starting address. */
+	ha->nvram_size = sizeof(nvram_t);
+	ha->nvram_base = 0;
+	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
+		if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
+			ha->nvram_base = 0x80;
+
+	/* Get NVRAM data and calculate checksum. */
+	ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
+	for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
+		chksum += *ptr++;
+
+	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
+	    "Contents of NVRAM.\n");
+	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
+	    (uint8_t *)nv, ha->nvram_size);
+
+	/* Bad NVRAM data, set defaults parameters. */
+	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
+	    nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
+		/* Reset NVRAM data. */
+		ql_log(ql_log_warn, vha, 0x0064,
+		    "Inconsistent NVRAM "
+		    "detected: checksum=0x%x id=%c version=0x%x.\n",
+		    chksum, nv->id[0], nv->nvram_version);
+		ql_log(ql_log_warn, vha, 0x0065,
+		    "Falling back to "
+		    "functioning (yet invalid -- WWPN) defaults.\n");
+
+		/*
+		 * Set default initialization control block.
+		 */
+		memset(nv, 0, ha->nvram_size);
+		nv->parameter_block_version = ICB_VERSION;
+
+		if (IS_QLA23XX(ha)) {
+			nv->firmware_options[0] = BIT_2 | BIT_1;
+			nv->firmware_options[1] = BIT_7 | BIT_5;
+			nv->add_firmware_options[0] = BIT_5;
+			nv->add_firmware_options[1] = BIT_5 | BIT_4;
+			nv->frame_payload_size = 2048;
+			nv->special_options[1] = BIT_7;
+		} else if (IS_QLA2200(ha)) {
+			nv->firmware_options[0] = BIT_2 | BIT_1;
+			nv->firmware_options[1] = BIT_7 | BIT_5;
+			nv->add_firmware_options[0] = BIT_5;
+			nv->add_firmware_options[1] = BIT_5 | BIT_4;
+			nv->frame_payload_size = 1024;
+		} else if (IS_QLA2100(ha)) {
+			nv->firmware_options[0] = BIT_3 | BIT_1;
+			nv->firmware_options[1] = BIT_5;
+			nv->frame_payload_size = 1024;
+		}
+
+		nv->max_iocb_allocation = cpu_to_le16(256);
+		nv->execution_throttle = cpu_to_le16(16);
+		nv->retry_count = 8;
+		nv->retry_delay = 1;
+
+		nv->port_name[0] = 33;
+		nv->port_name[3] = 224;
+		nv->port_name[4] = 139;
+
+		qla2xxx_nvram_wwn_from_ofw(vha, nv);
+
+		nv->login_timeout = 4;
+
+		/*
+		 * Set default host adapter parameters
+		 */
+		nv->host_p[1] = BIT_2;
+		nv->reset_delay = 5;
+		nv->port_down_retry_count = 8;
+		nv->max_luns_per_target = cpu_to_le16(8);
+		nv->link_down_timeout = 60;
+
+		rval = 1;
+	}
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
+	/*
+	 * The SN2 does not provide BIOS emulation which means you can't change
+	 * potentially bogus BIOS settings. Force the use of default settings
+	 * for link rate and frame size.  Hope that the rest of the settings
+	 * are valid.
+	 */
+	if (ia64_platform_is("sn2")) {
+		nv->frame_payload_size = 2048;
+		if (IS_QLA23XX(ha))
+			nv->special_options[1] = BIT_7;
+	}
+#endif
+
+	/* Reset Initialization control block */
+	memset(icb, 0, ha->init_cb_size);
+
+	/*
+	 * Setup driver NVRAM options.
+	 */
+	nv->firmware_options[0] |= (BIT_6 | BIT_1);
+	nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
+	nv->firmware_options[1] |= (BIT_5 | BIT_0);
+	nv->firmware_options[1] &= ~BIT_4;
+
+	if (IS_QLA23XX(ha)) {
+		nv->firmware_options[0] |= BIT_2;
+		nv->firmware_options[0] &= ~BIT_3;
+		nv->special_options[0] &= ~BIT_6;
+		nv->add_firmware_options[1] |= BIT_5 | BIT_4;
+
+		if (IS_QLA2300(ha)) {
+			if (ha->fb_rev == FPM_2310) {
+				strcpy(ha->model_number, "QLA2310");
+			} else {
+				strcpy(ha->model_number, "QLA2300");
+			}
+		} else {
+			qla2x00_set_model_info(vha, nv->model_number,
+			    sizeof(nv->model_number), "QLA23xx");
+		}
+	} else if (IS_QLA2200(ha)) {
+		nv->firmware_options[0] |= BIT_2;
+		/*
+		 * 'Point-to-point preferred, else loop' is not a safe
+		 * connection mode setting.
+		 */
+		if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
+		    (BIT_5 | BIT_4)) {
+			/* Force 'loop preferred, else point-to-point'. */
+			nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
+			nv->add_firmware_options[0] |= BIT_5;
+		}
+		strcpy(ha->model_number, "QLA22xx");
+	} else /*if (IS_QLA2100(ha))*/ {
+		strcpy(ha->model_number, "QLA2100");
+	}
+
+	/*
+	 * Copy over NVRAM RISC parameter block to initialization control block.
+	 */
+	dptr1 = (uint8_t *)icb;
+	dptr2 = (uint8_t *)&nv->parameter_block_version;
+	cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	/* Copy 2nd half. */
+	dptr1 = (uint8_t *)icb->add_firmware_options;
+	cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	/* Use alternate WWN? */
+	if (nv->host_p[1] & BIT_7) {
+		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+	}
+
+	/* Prepare nodename */
+	if ((icb->firmware_options[1] & BIT_6) == 0) {
+		/*
+		 * Firmware will apply the following mask if the nodename was
+		 * not provided.
+		 */
+		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+		icb->node_name[0] &= 0xF0;
+	}
+
+	/*
+	 * Set host adapter parameters.
+	 */
+
+	/*
+	 * BIT_7 in the host-parameters section allows for modification to
+	 * internal driver logging.
+	 */
+	if (nv->host_p[0] & BIT_7)
+		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+	ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
+	/* Always load RISC code on non ISP2[12]00 chips. */
+	if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
+		ha->flags.disable_risc_code_load = 0;
+	ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
+	ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
+	ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
+	ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
+	ha->flags.disable_serdes = 0;
+
+	ha->operating_mode =
+	    (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+	memcpy(ha->fw_seriallink_options, nv->seriallink_options,
+	    sizeof(ha->fw_seriallink_options));
+
+	/* save HBA serial number */
+	ha->serial0 = icb->port_name[5];
+	ha->serial1 = icb->port_name[6];
+	ha->serial2 = icb->port_name[7];
+	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+	icb->execution_throttle = cpu_to_le16(0xFFFF);
+
+	ha->retry_count = nv->retry_count;
+
+	/* Set minimum login_timeout to 4 seconds. */
+	if (nv->login_timeout != ql2xlogintimeout)
+		nv->login_timeout = ql2xlogintimeout;
+	if (nv->login_timeout < 4)
+		nv->login_timeout = 4;
+	ha->login_timeout = nv->login_timeout;
+
+	/* Set minimum RATOV to 100 tenths of a second. */
+	ha->r_a_tov = 100;
+
+	ha->loop_reset_delay = nv->reset_delay;
+
+	/* Link Down Timeout = 0:
+	 *
+	 * 	When Port Down timer expires we will start returning
+	 *	I/O's to OS with "DID_NO_CONNECT".
+	 *
+	 * Link Down Timeout != 0:
+	 *
+	 *	 The driver waits for the link to come up after link down
+	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
+	 */
+	if (nv->link_down_timeout == 0) {
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+	} else {
+		ha->link_down_timeout =	 nv->link_down_timeout;
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - ha->link_down_timeout);
+	}
+
+	/*
+	 * Need enough time to try and get the port back.
+	 */
+	ha->port_down_retry_count = nv->port_down_retry_count;
+	if (qlport_down_retry)
+		ha->port_down_retry_count = qlport_down_retry;
+	/* Set login_retry_count */
+	ha->login_retry_count  = nv->retry_count;
+	if (ha->port_down_retry_count == nv->port_down_retry_count &&
+	    ha->port_down_retry_count > 3)
+		ha->login_retry_count = ha->port_down_retry_count;
+	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+		ha->login_retry_count = ha->port_down_retry_count;
+	if (ql2xloginretrycount)
+		ha->login_retry_count = ql2xloginretrycount;
+
+	icb->lun_enables = cpu_to_le16(0);
+	icb->command_resource_count = 0;
+	icb->immediate_notify_resource_count = 0;
+	icb->timeout = cpu_to_le16(0);
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+		/* Enable RIO */
+		icb->firmware_options[0] &= ~BIT_3;
+		icb->add_firmware_options[0] &=
+		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		icb->add_firmware_options[0] |= BIT_2;
+		icb->response_accumulation_timer = 3;
+		icb->interrupt_delay_timer = 5;
+
+		vha->flags.process_response_queue = 1;
+	} else {
+		/* Enable ZIO. */
+		if (!vha->flags.init_done) {
+			ha->zio_mode = icb->add_firmware_options[0] &
+			    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+			ha->zio_timer = icb->interrupt_delay_timer ?
+			    icb->interrupt_delay_timer: 2;
+		}
+		icb->add_firmware_options[0] &=
+		    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		vha->flags.process_response_queue = 0;
+		if (ha->zio_mode != QLA_ZIO_DISABLED) {
+			ha->zio_mode = QLA_ZIO_MODE_6;
+
+			ql_log(ql_log_info, vha, 0x0068,
+			    "ZIO mode %d enabled; timer delay (%d us).\n",
+			    ha->zio_mode, ha->zio_timer * 100);
+
+			icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
+			icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
+			vha->flags.process_response_queue = 1;
+		}
+	}
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0069,
+		    "NVRAM configuration failed.\n");
+	}
+	return (rval);
+}
+
+static void
+qla2x00_rport_del(void *data)
+{
+	fc_port_t *fcport = data;
+	struct fc_rport *rport;
+	unsigned long flags;
+
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+	rport = fcport->drport ? fcport->drport: fcport->rport;
+	fcport->drport = NULL;
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+	if (rport) {
+		ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
+		    "%s %8phN. rport %p roles %x\n",
+		    __func__, fcport->port_name, rport,
+		    rport->roles);
+
+		fc_remote_port_delete(rport);
+	}
+}
+
+/**
+ * qla2x00_alloc_fcport() - Allocate a generic fcport.
+ * @ha: HA context
+ * @flags: allocation flags
+ *
+ * Returns a pointer to the allocated fcport, or NULL, if none available.
+ */
+fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
+{
+	fc_port_t *fcport;
+
+	fcport = kzalloc(sizeof(fc_port_t), flags);
+	if (!fcport)
+		return NULL;
+
+	/* Setup fcport template structure. */
+	fcport->vha = vha;
+	fcport->port_type = FCT_UNKNOWN;
+	fcport->loop_id = FC_NO_LOOP_ID;
+	qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
+	fcport->supported_classes = FC_COS_UNSPECIFIED;
+	fcport->fp_speed = PORT_SPEED_UNKNOWN;
+
+	fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
+		sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
+		flags);
+	fcport->disc_state = DSC_DELETED;
+	fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+	fcport->deleted = QLA_SESS_DELETED;
+	fcport->login_retry = vha->hw->login_retry_count;
+	fcport->login_retry = 5;
+	fcport->logout_on_delete = 1;
+
+	if (!fcport->ct_desc.ct_sns) {
+		ql_log(ql_log_warn, vha, 0xd049,
+		    "Failed to allocate ct_sns request.\n");
+		kfree(fcport);
+		return NULL;
+	}
+	INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+	INIT_LIST_HEAD(&fcport->gnl_entry);
+	INIT_LIST_HEAD(&fcport->list);
+
+	return fcport;
+}
+
+void
+qla2x00_free_fcport(fc_port_t *fcport)
+{
+	if (fcport->ct_desc.ct_sns) {
+		dma_free_coherent(&fcport->vha->hw->pdev->dev,
+			sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
+			fcport->ct_desc.ct_sns_dma);
+
+		fcport->ct_desc.ct_sns = NULL;
+	}
+	kfree(fcport);
+}
+
+/*
+ * qla2x00_configure_loop
+ *      Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ *      ha                = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      1 = error.
+ *      2 = database was full and device was not configured.
+ */
+static int
+qla2x00_configure_loop(scsi_qla_host_t *vha)
+{
+	int  rval;
+	unsigned long flags, save_flags;
+	struct qla_hw_data *ha = vha->hw;
+	rval = QLA_SUCCESS;
+
+	/* Get Initiator ID */
+	if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
+		rval = qla2x00_configure_hba(vha);
+		if (rval != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_disc, vha, 0x2013,
+			    "Unable to configure HBA.\n");
+			return (rval);
+		}
+	}
+
+	save_flags = flags = vha->dpc_flags;
+	ql_dbg(ql_dbg_disc, vha, 0x2014,
+	    "Configure loop -- dpc flags = 0x%lx.\n", flags);
+
+	/*
+	 * If we have both an RSCN and PORT UPDATE pending then handle them
+	 * both at the same time.
+	 */
+	clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+	clear_bit(RSCN_UPDATE, &vha->dpc_flags);
+
+	qla2x00_get_data_rate(vha);
+
+	/* Determine what we need to do */
+	if (ha->current_topology == ISP_CFG_FL &&
+	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
+
+		set_bit(RSCN_UPDATE, &flags);
+
+	} else if (ha->current_topology == ISP_CFG_F &&
+	    (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
+
+		set_bit(RSCN_UPDATE, &flags);
+		clear_bit(LOCAL_LOOP_UPDATE, &flags);
+
+	} else if (ha->current_topology == ISP_CFG_N) {
+		clear_bit(RSCN_UPDATE, &flags);
+	} else if (ha->current_topology == ISP_CFG_NL) {
+		clear_bit(RSCN_UPDATE, &flags);
+		set_bit(LOCAL_LOOP_UPDATE, &flags);
+	} else if (!vha->flags.online ||
+	    (test_bit(ABORT_ISP_ACTIVE, &flags))) {
+		set_bit(RSCN_UPDATE, &flags);
+		set_bit(LOCAL_LOOP_UPDATE, &flags);
+	}
+
+	if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+			ql_dbg(ql_dbg_disc, vha, 0x2015,
+			    "Loop resync needed, failing.\n");
+			rval = QLA_FUNCTION_FAILED;
+		} else
+			rval = qla2x00_configure_local_loop(vha);
+	}
+
+	if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
+		if (LOOP_TRANSITION(vha)) {
+			ql_dbg(ql_dbg_disc, vha, 0x2099,
+			    "Needs RSCN update and loop transition.\n");
+			rval = QLA_FUNCTION_FAILED;
+		}
+		else
+			rval = qla2x00_configure_fabric(vha);
+	}
+
+	if (rval == QLA_SUCCESS) {
+		if (atomic_read(&vha->loop_down_timer) ||
+		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			atomic_set(&vha->loop_state, LOOP_READY);
+			ql_dbg(ql_dbg_disc, vha, 0x2069,
+			    "LOOP READY.\n");
+			ha->flags.fw_init_done = 1;
+
+			/*
+			 * Process any ATIO queue entries that came in
+			 * while we weren't online.
+			 */
+			if (qla_tgt_mode_enabled(vha) ||
+			    qla_dual_mode_enabled(vha)) {
+				if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+					spin_lock_irqsave(&ha->tgt.atio_lock,
+					    flags);
+					qlt_24xx_process_atio_queue(vha, 0);
+					spin_unlock_irqrestore(
+					    &ha->tgt.atio_lock, flags);
+				} else {
+					spin_lock_irqsave(&ha->hardware_lock,
+					    flags);
+					qlt_24xx_process_atio_queue(vha, 1);
+					spin_unlock_irqrestore(
+					    &ha->hardware_lock, flags);
+				}
+			}
+		}
+	}
+
+	if (rval) {
+		ql_dbg(ql_dbg_disc, vha, 0x206a,
+		    "%s *** FAILED ***.\n", __func__);
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x206b,
+		    "%s: exiting normally.\n", __func__);
+	}
+
+	/* Restore state if a resync event occurred during processing */
+	if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+		if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+		if (test_bit(RSCN_UPDATE, &save_flags)) {
+			set_bit(RSCN_UPDATE, &vha->dpc_flags);
+		}
+	}
+
+	return (rval);
+}
+
+
+
+/*
+ * qla2x00_configure_local_loop
+ *	Updates Fibre Channel Device Database with local loop devices.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *
+ * Returns:
+ *	0 = success.
+ */
+static int
+qla2x00_configure_local_loop(scsi_qla_host_t *vha)
+{
+	int		rval, rval2;
+	int		found_devs;
+	int		found;
+	fc_port_t	*fcport, *new_fcport;
+
+	uint16_t	index;
+	uint16_t	entries;
+	char		*id_iter;
+	uint16_t	loop_id;
+	uint8_t		domain, area, al_pa;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+
+	found_devs = 0;
+	new_fcport = NULL;
+	entries = MAX_FIBRE_DEVICES_LOOP;
+
+	/* Get list of logged in devices. */
+	memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
+	rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
+	    &entries);
+	if (rval != QLA_SUCCESS)
+		goto cleanup_allocation;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2011,
+	    "Entries in ID list (%d).\n", entries);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+	    (uint8_t *)ha->gid_list,
+	    entries * sizeof(struct gid_list_info));
+
+	/* Allocate temporary fcport for any new fcports discovered. */
+	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (new_fcport == NULL) {
+		ql_log(ql_log_warn, vha, 0x2012,
+		    "Memory allocation failed for fcport.\n");
+		rval = QLA_MEMORY_ALLOC_FAILED;
+		goto cleanup_allocation;
+	}
+	new_fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+	/*
+	 * Mark local devices that were present with FCF_DEVICE_LOST for now.
+	 */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (atomic_read(&fcport->state) == FCS_ONLINE &&
+		    fcport->port_type != FCT_BROADCAST &&
+		    (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+
+			ql_dbg(ql_dbg_disc, vha, 0x2096,
+			    "Marking port lost loop_id=0x%04x.\n",
+			    fcport->loop_id);
+
+			qla2x00_mark_device_lost(vha, fcport, 0, 0);
+		}
+	}
+
+	/* Add devices to port list. */
+	id_iter = (char *)ha->gid_list;
+	for (index = 0; index < entries; index++) {
+		domain = ((struct gid_list_info *)id_iter)->domain;
+		area = ((struct gid_list_info *)id_iter)->area;
+		al_pa = ((struct gid_list_info *)id_iter)->al_pa;
+		if (IS_QLA2100(ha) || IS_QLA2200(ha))
+			loop_id = (uint16_t)
+			    ((struct gid_list_info *)id_iter)->loop_id_2100;
+		else
+			loop_id = le16_to_cpu(
+			    ((struct gid_list_info *)id_iter)->loop_id);
+		id_iter += ha->gid_list_info_size;
+
+		/* Bypass reserved domain fields. */
+		if ((domain & 0xf0) == 0xf0)
+			continue;
+
+		/* Bypass if not same domain and area of adapter. */
+		if (area && domain &&
+		    (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
+			continue;
+
+		/* Bypass invalid local loop ID. */
+		if (loop_id > LAST_LOCAL_LOOP_ID)
+			continue;
+
+		memset(new_fcport->port_name, 0, WWN_SIZE);
+
+		/* Fill in member data. */
+		new_fcport->d_id.b.domain = domain;
+		new_fcport->d_id.b.area = area;
+		new_fcport->d_id.b.al_pa = al_pa;
+		new_fcport->loop_id = loop_id;
+
+		rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
+		if (rval2 != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_disc, vha, 0x2097,
+			    "Failed to retrieve fcport information "
+			    "-- get_port_database=%x, loop_id=0x%04x.\n",
+			    rval2, new_fcport->loop_id);
+			ql_dbg(ql_dbg_disc, vha, 0x2105,
+			    "Scheduling resync.\n");
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			continue;
+		}
+
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+		/* Check for matching device in port list. */
+		found = 0;
+		fcport = NULL;
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (memcmp(new_fcport->port_name, fcport->port_name,
+			    WWN_SIZE))
+				continue;
+
+			fcport->flags &= ~FCF_FABRIC_DEVICE;
+			fcport->loop_id = new_fcport->loop_id;
+			fcport->port_type = new_fcport->port_type;
+			fcport->d_id.b24 = new_fcport->d_id.b24;
+			memcpy(fcport->node_name, new_fcport->node_name,
+			    WWN_SIZE);
+
+			if (!fcport->login_succ) {
+				vha->fcport_count++;
+				fcport->login_succ = 1;
+				fcport->disc_state = DSC_LOGIN_COMPLETE;
+			}
+
+			found++;
+			break;
+		}
+
+		if (!found) {
+			/* New device, add to fcports list. */
+			list_add_tail(&new_fcport->list, &vha->vp_fcports);
+
+			/* Allocate a new replacement fcport. */
+			fcport = new_fcport;
+			if (!fcport->login_succ) {
+				vha->fcport_count++;
+				fcport->login_succ = 1;
+				fcport->disc_state = DSC_LOGIN_COMPLETE;
+			}
+
+			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+			new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+
+			if (new_fcport == NULL) {
+				ql_log(ql_log_warn, vha, 0xd031,
+				    "Failed to allocate memory for fcport.\n");
+				rval = QLA_MEMORY_ALLOC_FAILED;
+				goto cleanup_allocation;
+			}
+			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+			new_fcport->flags &= ~FCF_FABRIC_DEVICE;
+		}
+
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+		/* Base iIDMA settings on HBA port speed. */
+		fcport->fp_speed = ha->link_data_rate;
+
+		qla2x00_update_fcport(vha, fcport);
+
+		found_devs++;
+	}
+
+cleanup_allocation:
+	kfree(new_fcport);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_disc, vha, 0x2098,
+		    "Configure local loop error exit: rval=%x.\n", rval);
+	}
+
+	return (rval);
+}
+
+static void
+qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int rval;
+	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_IIDMA_CAPABLE(ha))
+		return;
+
+	if (atomic_read(&fcport->state) != FCS_ONLINE)
+		return;
+
+	if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
+	    fcport->fp_speed > ha->link_data_rate ||
+	    !ha->flags.gpsc_supported)
+		return;
+
+	rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
+	    mb);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_disc, vha, 0x2004,
+		    "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
+		    fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2005,
+		    "iIDMA adjusted to %s GB/s on %8phN.\n",
+		    qla2x00_get_link_speed_str(ha, fcport->fp_speed),
+		    fcport->port_name);
+	}
+}
+
+/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
+static void
+qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	struct fc_rport_identifiers rport_ids;
+	struct fc_rport *rport;
+	unsigned long flags;
+
+	rport_ids.node_name = wwn_to_u64(fcport->node_name);
+	rport_ids.port_name = wwn_to_u64(fcport->port_name);
+	rport_ids.port_id = fcport->d_id.b.domain << 16 |
+	    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
+	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+	fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
+	if (!rport) {
+		ql_log(ql_log_warn, vha, 0x2006,
+		    "Unable to allocate fc remote port.\n");
+		return;
+	}
+
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+	*((fc_port_t **)rport->dd_data) = fcport;
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+
+	rport->supported_classes = fcport->supported_classes;
+
+	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+	if (fcport->port_type == FCT_INITIATOR)
+		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+	if (fcport->port_type == FCT_TARGET)
+		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ee,
+	    "%s %8phN. rport %p is %s mode\n",
+	    __func__, fcport->port_name, rport,
+	    (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+
+	fc_remote_port_rolechg(rport, rport_ids.roles);
+}
+
+/*
+ * qla2x00_update_fcport
+ *	Updates device on list.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	fcport = port structure pointer.
+ *
+ * Return:
+ *	0  - Success
+ *  BIT_0 - error
+ *
+ * Context:
+ *	Kernel context.
+ */
+void
+qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	fcport->vha = vha;
+
+	if (IS_SW_RESV_ADDR(fcport->d_id))
+		return;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+	    __func__, fcport->port_name);
+
+	if (IS_QLAFX00(vha->hw)) {
+		qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+		goto reg_port;
+	}
+	fcport->login_retry = 0;
+	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+	fcport->disc_state = DSC_LOGIN_COMPLETE;
+	fcport->deleted = 0;
+	fcport->logout_on_delete = 1;
+
+	if (fcport->fc4f_nvme) {
+		qla_nvme_register_remote(vha, fcport);
+		return;
+	}
+
+	qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+	qla2x00_iidma_fcport(vha, fcport);
+	qla24xx_update_fcport_fcp_prio(vha, fcport);
+
+reg_port:
+	switch (vha->host->active_mode) {
+	case MODE_INITIATOR:
+		qla2x00_reg_remote_port(vha, fcport);
+		break;
+	case MODE_TARGET:
+		if (!vha->vha_tgt.qla_tgt->tgt_stop &&
+			!vha->vha_tgt.qla_tgt->tgt_stopped)
+			qlt_fc_port_added(vha, fcport);
+		break;
+	case MODE_DUAL:
+		qla2x00_reg_remote_port(vha, fcport);
+		if (!vha->vha_tgt.qla_tgt->tgt_stop &&
+			!vha->vha_tgt.qla_tgt->tgt_stopped)
+			qlt_fc_port_added(vha, fcport);
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * qla2x00_configure_fabric
+ *      Setup SNS devices with loop ID's.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      BIT_0 = error
+ */
+static int
+qla2x00_configure_fabric(scsi_qla_host_t *vha)
+{
+	int	rval;
+	fc_port_t	*fcport;
+	uint16_t	mb[MAILBOX_REGISTER_COUNT];
+	uint16_t	loop_id;
+	LIST_HEAD(new_fcports);
+	struct qla_hw_data *ha = vha->hw;
+	int		discovery_gen;
+
+	/* If FL port exists, then SNS is present */
+	if (IS_FWI2_CAPABLE(ha))
+		loop_id = NPH_F_PORT;
+	else
+		loop_id = SNS_FL_PORT;
+	rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_disc, vha, 0x20a0,
+		    "MBX_GET_PORT_NAME failed, No FL Port.\n");
+
+		vha->device_flags &= ~SWITCH_FOUND;
+		return (QLA_SUCCESS);
+	}
+	vha->device_flags |= SWITCH_FOUND;
+
+
+	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+		rval = qla2x00_send_change_request(vha, 0x3, 0);
+		if (rval != QLA_SUCCESS)
+			ql_log(ql_log_warn, vha, 0x121,
+				"Failed to enable receiving of RSCN requests: 0x%x.\n",
+				rval);
+	}
+
+
+	do {
+		qla2x00_mgmt_svr_login(vha);
+
+		/* FDMI support. */
+		if (ql2xfdmienable &&
+		    test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+			qla2x00_fdmi_register(vha);
+
+		/* Ensure we are logged into the SNS. */
+		loop_id = NPH_SNS_LID(ha);
+		rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
+		    0xfc, mb, BIT_1|BIT_0);
+		if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+			ql_dbg(ql_dbg_disc, vha, 0x20a1,
+			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
+			    loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			return rval;
+		}
+		if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
+			if (qla2x00_rft_id(vha)) {
+				/* EMPTY */
+				ql_dbg(ql_dbg_disc, vha, 0x20a2,
+				    "Register FC-4 TYPE failed.\n");
+				if (test_bit(LOOP_RESYNC_NEEDED,
+				    &vha->dpc_flags))
+					break;
+			}
+			if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
+				/* EMPTY */
+				ql_dbg(ql_dbg_disc, vha, 0x209a,
+				    "Register FC-4 Features failed.\n");
+				if (test_bit(LOOP_RESYNC_NEEDED,
+				    &vha->dpc_flags))
+					break;
+			}
+			if (vha->flags.nvme_enabled) {
+				if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
+					ql_dbg(ql_dbg_disc, vha, 0x2049,
+					    "Register NVME FC Type Features failed.\n");
+				}
+			}
+			if (qla2x00_rnn_id(vha)) {
+				/* EMPTY */
+				ql_dbg(ql_dbg_disc, vha, 0x2104,
+				    "Register Node Name failed.\n");
+				if (test_bit(LOOP_RESYNC_NEEDED,
+				    &vha->dpc_flags))
+					break;
+			} else if (qla2x00_rsnn_nn(vha)) {
+				/* EMPTY */
+				ql_dbg(ql_dbg_disc, vha, 0x209b,
+				    "Register Symbolic Node Name failed.\n");
+				if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+					break;
+			}
+		}
+
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			fcport->scan_state = QLA_FCPORT_SCAN;
+		}
+
+		/* Mark the time right before querying FW for connected ports.
+		 * This process is long, asynchronous and by the time it's done,
+		 * collected information might not be accurate anymore. E.g.
+		 * disconnected port might have re-connected and a brand new
+		 * session has been created. In this case session's generation
+		 * will be newer than discovery_gen. */
+		qlt_do_generation_tick(vha, &discovery_gen);
+
+		rval = qla2x00_find_all_fabric_devs(vha);
+		if (rval != QLA_SUCCESS)
+			break;
+	} while (0);
+
+	if (!vha->nvme_local_port && vha->flags.nvme_enabled)
+		qla_nvme_register_hba(vha);
+
+	if (rval)
+		ql_dbg(ql_dbg_disc, vha, 0x2068,
+		    "Configure fabric error exit rval=%d.\n", rval);
+
+	return (rval);
+}
+
+/*
+ * qla2x00_find_all_fabric_devs
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	dev = database device entry pointer.
+ *
+ * Returns:
+ *	0 = success.
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
+{
+	int		rval;
+	uint16_t	loop_id;
+	fc_port_t	*fcport, *new_fcport;
+	int		found;
+
+	sw_info_t	*swl;
+	int		swl_idx;
+	int		first_dev, last_dev;
+	port_id_t	wrap = {}, nxt_d_id;
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	unsigned long flags;
+
+	rval = QLA_SUCCESS;
+
+	/* Try GID_PT to get device list, else GAN. */
+	if (!ha->swl)
+		ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
+		    GFP_KERNEL);
+	swl = ha->swl;
+	if (!swl) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x209c,
+		    "GID_PT allocations failed, fallback on GA_NXT.\n");
+	} else {
+		memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
+		if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
+			swl = NULL;
+			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+				return rval;
+		} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
+			swl = NULL;
+			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+				return rval;
+		} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
+			swl = NULL;
+			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+				return rval;
+		} else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
+			swl = NULL;
+			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+				return rval;
+		}
+
+		/* If other queries succeeded probe for FC-4 type */
+		if (swl) {
+			qla2x00_gff_id(vha, swl);
+			if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+				return rval;
+		}
+	}
+	swl_idx = 0;
+
+	/* Allocate temporary fcport for any new fcports discovered. */
+	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (new_fcport == NULL) {
+		ql_log(ql_log_warn, vha, 0x209d,
+		    "Failed to allocate memory for fcport.\n");
+		return (QLA_MEMORY_ALLOC_FAILED);
+	}
+	new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
+	/* Set start port ID scan at adapter ID. */
+	first_dev = 1;
+	last_dev = 0;
+
+	/* Starting free loop ID. */
+	loop_id = ha->min_external_loopid;
+	for (; loop_id <= ha->max_loop_id; loop_id++) {
+		if (qla2x00_is_reserved_id(vha, loop_id))
+			continue;
+
+		if (ha->current_topology == ISP_CFG_FL &&
+		    (atomic_read(&vha->loop_down_timer) ||
+		     LOOP_TRANSITION(vha))) {
+			atomic_set(&vha->loop_down_timer, 0);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+			break;
+		}
+
+		if (swl != NULL) {
+			if (last_dev) {
+				wrap.b24 = new_fcport->d_id.b24;
+			} else {
+				new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
+				memcpy(new_fcport->node_name,
+				    swl[swl_idx].node_name, WWN_SIZE);
+				memcpy(new_fcport->port_name,
+				    swl[swl_idx].port_name, WWN_SIZE);
+				memcpy(new_fcport->fabric_port_name,
+				    swl[swl_idx].fabric_port_name, WWN_SIZE);
+				new_fcport->fp_speed = swl[swl_idx].fp_speed;
+				new_fcport->fc4_type = swl[swl_idx].fc4_type;
+
+				new_fcport->nvme_flag = 0;
+				new_fcport->fc4f_nvme = 0;
+				if (vha->flags.nvme_enabled &&
+				    swl[swl_idx].fc4f_nvme) {
+					new_fcport->fc4f_nvme =
+					    swl[swl_idx].fc4f_nvme;
+					ql_log(ql_log_info, vha, 0x2131,
+					    "FOUND: NVME port %8phC as FC Type 28h\n",
+					    new_fcport->port_name);
+				}
+
+				if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
+					last_dev = 1;
+				}
+				swl_idx++;
+			}
+		} else {
+			/* Send GA_NXT to the switch */
+			rval = qla2x00_ga_nxt(vha, new_fcport);
+			if (rval != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0x209e,
+				    "SNS scan failed -- assuming "
+				    "zero-entry result.\n");
+				rval = QLA_SUCCESS;
+				break;
+			}
+		}
+
+		/* If wrap on switch device list, exit. */
+		if (first_dev) {
+			wrap.b24 = new_fcport->d_id.b24;
+			first_dev = 0;
+		} else if (new_fcport->d_id.b24 == wrap.b24) {
+			ql_dbg(ql_dbg_disc, vha, 0x209f,
+			    "Device wrap (%02x%02x%02x).\n",
+			    new_fcport->d_id.b.domain,
+			    new_fcport->d_id.b.area,
+			    new_fcport->d_id.b.al_pa);
+			break;
+		}
+
+		/* Bypass if same physical adapter. */
+		if (new_fcport->d_id.b24 == base_vha->d_id.b24)
+			continue;
+
+		/* Bypass virtual ports of the same host. */
+		if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+			continue;
+
+		/* Bypass if same domain and area of adapter. */
+		if (((new_fcport->d_id.b24 & 0xffff00) ==
+		    (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
+			ISP_CFG_FL)
+			    continue;
+
+		/* Bypass reserved domain fields. */
+		if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
+			continue;
+
+		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
+		if (ql2xgffidenable &&
+		    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
+			continue;
+
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+
+		/* Locate matching device in database. */
+		found = 0;
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (memcmp(new_fcport->port_name, fcport->port_name,
+			    WWN_SIZE))
+				continue;
+
+			fcport->scan_state = QLA_FCPORT_FOUND;
+
+			found++;
+
+			/* Update port state. */
+			memcpy(fcport->fabric_port_name,
+			    new_fcport->fabric_port_name, WWN_SIZE);
+			fcport->fp_speed = new_fcport->fp_speed;
+
+			/*
+			 * If address the same and state FCS_ONLINE
+			 * (or in target mode), nothing changed.
+			 */
+			if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
+			    (atomic_read(&fcport->state) == FCS_ONLINE ||
+			     (vha->host->active_mode == MODE_TARGET))) {
+				break;
+			}
+
+			/*
+			 * If device was not a fabric device before.
+			 */
+			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+				fcport->d_id.b24 = new_fcport->d_id.b24;
+				qla2x00_clear_loop_id(fcport);
+				fcport->flags |= (FCF_FABRIC_DEVICE |
+				    FCF_LOGIN_NEEDED);
+				break;
+			}
+
+			/*
+			 * Port ID changed or device was marked to be updated;
+			 * Log it out if still logged in and mark it for
+			 * relogin later.
+			 */
+			if (qla_tgt_mode_enabled(base_vha)) {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
+					 "port changed FC ID, %8phC"
+					 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
+					 fcport->port_name,
+					 fcport->d_id.b.domain,
+					 fcport->d_id.b.area,
+					 fcport->d_id.b.al_pa,
+					 fcport->loop_id,
+					 new_fcport->d_id.b.domain,
+					 new_fcport->d_id.b.area,
+					 new_fcport->d_id.b.al_pa);
+				fcport->d_id.b24 = new_fcport->d_id.b24;
+				break;
+			}
+
+			fcport->d_id.b24 = new_fcport->d_id.b24;
+			fcport->flags |= FCF_LOGIN_NEEDED;
+			break;
+		}
+
+		if (found) {
+			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+			continue;
+		}
+		/* If device was not in our fcports list, then add it. */
+		new_fcport->scan_state = QLA_FCPORT_FOUND;
+		list_add_tail(&new_fcport->list, &vha->vp_fcports);
+
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+
+		/* Allocate a new replacement fcport. */
+		nxt_d_id.b24 = new_fcport->d_id.b24;
+		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (new_fcport == NULL) {
+			ql_log(ql_log_warn, vha, 0xd032,
+			    "Memory allocation failed for fcport.\n");
+			return (QLA_MEMORY_ALLOC_FAILED);
+		}
+		new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
+		new_fcport->d_id.b24 = nxt_d_id.b24;
+	}
+
+	qla2x00_free_fcport(new_fcport);
+
+	/*
+	 * Logout all previous fabric dev marked lost, except FCP2 devices.
+	 */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
+
+		if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+			continue;
+
+		if (fcport->scan_state == QLA_FCPORT_SCAN) {
+			if ((qla_dual_mode_enabled(vha) ||
+			    qla_ini_mode_enabled(vha)) &&
+			    atomic_read(&fcport->state) == FCS_ONLINE) {
+				qla2x00_mark_device_lost(vha, fcport,
+					ql2xplogiabsentdevice, 0);
+				if (fcport->loop_id != FC_NO_LOOP_ID &&
+				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+				    fcport->port_type != FCT_INITIATOR &&
+				    fcport->port_type != FCT_BROADCAST) {
+					ql_dbg(ql_dbg_disc, vha, 0x20f0,
+					    "%s %d %8phC post del sess\n",
+					    __func__, __LINE__,
+					    fcport->port_name);
+
+					qlt_schedule_sess_for_deletion_lock
+						(fcport);
+					continue;
+				}
+			}
+		}
+
+		if (fcport->scan_state == QLA_FCPORT_FOUND &&
+		    (fcport->flags & FCF_LOGIN_NEEDED) != 0)
+			qla24xx_fcport_handle_login(vha, fcport);
+	}
+	return (rval);
+}
+
+/*
+ * qla2x00_find_new_loop_id
+ *	Scan through our port list and find a new usable loop ID.
+ *
+ * Input:
+ *	ha:	adapter state pointer.
+ *	dev:	port structure pointer.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
+{
+	int	rval;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags = 0;
+
+	rval = QLA_SUCCESS;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+
+	dev->loop_id = find_first_zero_bit(ha->loop_id_map,
+	    LOOPID_MAP_SIZE);
+	if (dev->loop_id >= LOOPID_MAP_SIZE ||
+	    qla2x00_is_reserved_id(vha, dev->loop_id)) {
+		dev->loop_id = FC_NO_LOOP_ID;
+		rval = QLA_FUNCTION_FAILED;
+	} else
+		set_bit(dev->loop_id, ha->loop_id_map);
+
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	if (rval == QLA_SUCCESS)
+		ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
+		    "Assigning new loopid=%x, portid=%x.\n",
+		    dev->loop_id, dev->d_id.b24);
+	else
+		ql_log(ql_log_warn, dev->vha, 0x2087,
+		    "No loop_id's available, portid=%x.\n",
+		    dev->d_id.b24);
+
+	return (rval);
+}
+
+
+/*
+ * qla2x00_fabric_login
+ *	Issue fabric login command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	device = pointer to FC device type structure.
+ *
+ * Returns:
+ *      0 - Login successfully
+ *      1 - Login failed
+ *      2 - Initiator device
+ *      3 - Fatal error
+ */
+int
+qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
+    uint16_t *next_loopid)
+{
+	int	rval;
+	int	retry;
+	uint16_t tmp_loopid;
+	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	struct qla_hw_data *ha = vha->hw;
+
+	retry = 0;
+	tmp_loopid = 0;
+
+	for (;;) {
+		ql_dbg(ql_dbg_disc, vha, 0x2000,
+		    "Trying Fabric Login w/loop id 0x%04x for port "
+		    "%02x%02x%02x.\n",
+		    fcport->loop_id, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+		/* Login fcport on switch. */
+		rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa, mb, BIT_0);
+		if (rval != QLA_SUCCESS) {
+			return rval;
+		}
+		if (mb[0] == MBS_PORT_ID_USED) {
+			/*
+			 * Device has another loop ID.  The firmware team
+			 * recommends the driver perform an implicit login with
+			 * the specified ID again. The ID we just used is save
+			 * here so we return with an ID that can be tried by
+			 * the next login.
+			 */
+			retry++;
+			tmp_loopid = fcport->loop_id;
+			fcport->loop_id = mb[1];
+
+			ql_dbg(ql_dbg_disc, vha, 0x2001,
+			    "Fabric Login: port in use - next loop "
+			    "id=0x%04x, port id= %02x%02x%02x.\n",
+			    fcport->loop_id, fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+		} else if (mb[0] == MBS_COMMAND_COMPLETE) {
+			/*
+			 * Login succeeded.
+			 */
+			if (retry) {
+				/* A retry occurred before. */
+				*next_loopid = tmp_loopid;
+			} else {
+				/*
+				 * No retry occurred before. Just increment the
+				 * ID value for next login.
+				 */
+				*next_loopid = (fcport->loop_id + 1);
+			}
+
+			if (mb[1] & BIT_0) {
+				fcport->port_type = FCT_INITIATOR;
+			} else {
+				fcport->port_type = FCT_TARGET;
+				if (mb[1] & BIT_1) {
+					fcport->flags |= FCF_FCP2_DEVICE;
+				}
+			}
+
+			if (mb[10] & BIT_0)
+				fcport->supported_classes |= FC_COS_CLASS2;
+			if (mb[10] & BIT_1)
+				fcport->supported_classes |= FC_COS_CLASS3;
+
+			if (IS_FWI2_CAPABLE(ha)) {
+				if (mb[10] & BIT_7)
+					fcport->flags |=
+					    FCF_CONF_COMP_SUPPORTED;
+			}
+
+			rval = QLA_SUCCESS;
+			break;
+		} else if (mb[0] == MBS_LOOP_ID_USED) {
+			/*
+			 * Loop ID already used, try next loop ID.
+			 */
+			fcport->loop_id++;
+			rval = qla2x00_find_new_loop_id(vha, fcport);
+			if (rval != QLA_SUCCESS) {
+				/* Ran out of loop IDs to use */
+				break;
+			}
+		} else if (mb[0] == MBS_COMMAND_ERROR) {
+			/*
+			 * Firmware possibly timed out during login. If NO
+			 * retries are left to do then the device is declared
+			 * dead.
+			 */
+			*next_loopid = fcport->loop_id;
+			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
+			    fcport->d_id.b.domain, fcport->d_id.b.area,
+			    fcport->d_id.b.al_pa);
+			qla2x00_mark_device_lost(vha, fcport, 1, 0);
+
+			rval = 1;
+			break;
+		} else {
+			/*
+			 * unrecoverable / not handled error
+			 */
+			ql_dbg(ql_dbg_disc, vha, 0x2002,
+			    "Failed=%x port_id=%02x%02x%02x loop_id=%x "
+			    "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+			    fcport->loop_id, jiffies);
+
+			*next_loopid = fcport->loop_id;
+			ha->isp_ops->fabric_logout(vha, fcport->loop_id,
+			    fcport->d_id.b.domain, fcport->d_id.b.area,
+			    fcport->d_id.b.al_pa);
+			qla2x00_clear_loop_id(fcport);
+			fcport->login_retry = 0;
+
+			rval = 3;
+			break;
+		}
+	}
+
+	return (rval);
+}
+
+/*
+ * qla2x00_local_device_login
+ *	Issue local device login command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	loop_id = loop id of device to login to.
+ *
+ * Returns (Where's the #define!!!!):
+ *      0 - Login successfully
+ *      1 - Login failed
+ *      3 - Fatal error
+ */
+int
+qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int		rval;
+	uint16_t	mb[MAILBOX_REGISTER_COUNT];
+
+	memset(mb, 0, sizeof(mb));
+	rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
+	if (rval == QLA_SUCCESS) {
+		/* Interrogate mailbox registers for any errors */
+		if (mb[0] == MBS_COMMAND_ERROR)
+			rval = 1;
+		else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
+			/* device not in PCB table */
+			rval = 3;
+	}
+
+	return (rval);
+}
+
+/*
+ *  qla2x00_loop_resync
+ *      Resync with fibre channel devices.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qla2x00_loop_resync(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t wait_time;
+	struct req_que *req;
+	struct rsp_que *rsp;
+
+	req = vha->req;
+	rsp = req->rsp;
+
+	clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+	if (vha->flags.online) {
+		if (!(rval = qla2x00_fw_ready(vha))) {
+			/* Wait at most MAX_TARGET RSCNs for a stable link. */
+			wait_time = 256;
+			do {
+				if (!IS_QLAFX00(vha->hw)) {
+					/*
+					 * Issue a marker after FW becomes
+					 * ready.
+					 */
+					qla2x00_marker(vha, req, rsp, 0, 0,
+						MK_SYNC_ALL);
+					vha->marker_needed = 0;
+				}
+
+				/* Remap devices on Loop. */
+				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+
+				if (IS_QLAFX00(vha->hw))
+					qlafx00_configure_devices(vha);
+				else
+					qla2x00_configure_loop(vha);
+
+				wait_time--;
+			} while (!atomic_read(&vha->loop_down_timer) &&
+				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED,
+				&vha->dpc_flags)));
+		}
+	}
+
+	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+		return (QLA_FUNCTION_FAILED);
+
+	if (rval)
+		ql_dbg(ql_dbg_disc, vha, 0x206c,
+		    "%s *** FAILED ***.\n", __func__);
+
+	return (rval);
+}
+
+/*
+* qla2x00_perform_loop_resync
+* Description: This function will set the appropriate flags and call
+*              qla2x00_loop_resync. If successful loop will be resynced
+* Arguments : scsi_qla_host_t pointer
+* returm    : Success or Failure
+*/
+
+int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
+{
+	int32_t rval = 0;
+
+	if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
+		/*Configure the flags so that resync happens properly*/
+		atomic_set(&ha->loop_down_timer, 0);
+		if (!(ha->device_flags & DFLG_NO_CABLE)) {
+			atomic_set(&ha->loop_state, LOOP_UP);
+			set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+			set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+
+			rval = qla2x00_loop_resync(ha);
+		} else
+			atomic_set(&ha->loop_state, LOOP_DEAD);
+
+		clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
+	}
+
+	return rval;
+}
+
+void
+qla2x00_update_fcports(scsi_qla_host_t *base_vha)
+{
+	fc_port_t *fcport;
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha = base_vha->hw;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	/* Go with deferred removal of rport references. */
+	list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
+		atomic_inc(&vha->vref_count);
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (fcport->drport &&
+			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+				spin_unlock_irqrestore(&ha->vport_slock, flags);
+				qla2x00_rport_del(fcport);
+
+				spin_lock_irqsave(&ha->vport_slock, flags);
+			}
+		}
+		atomic_dec(&vha->vref_count);
+		wake_up(&vha->vref_waitq);
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+/* Assumes idc_lock always held on entry */
+void
+qla83xx_reset_ownership(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t drv_presence, drv_presence_mask;
+	uint32_t dev_part_info1, dev_part_info2, class_type;
+	uint32_t class_type_mask = 0x3;
+	uint16_t fcoe_other_function = 0xffff, i;
+
+	if (IS_QLA8044(ha)) {
+		drv_presence = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_ACTIVE_INDEX);
+		dev_part_info1 = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DEV_PART_INFO_INDEX);
+		dev_part_info2 = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DEV_PART_INFO2);
+	} else {
+		qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+		qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+	}
+	for (i = 0; i < 8; i++) {
+		class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
+		if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+		    (i != ha->portnum)) {
+			fcoe_other_function = i;
+			break;
+		}
+	}
+	if (fcoe_other_function == 0xffff) {
+		for (i = 0; i < 8; i++) {
+			class_type = ((dev_part_info2 >> (i * 4)) &
+			    class_type_mask);
+			if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
+			    ((i + 8) != ha->portnum)) {
+				fcoe_other_function = i + 8;
+				break;
+			}
+		}
+	}
+	/*
+	 * Prepare drv-presence mask based on fcoe functions present.
+	 * However consider only valid physical fcoe function numbers (0-15).
+	 */
+	drv_presence_mask = ~((1 << (ha->portnum)) |
+			((fcoe_other_function == 0xffff) ?
+			 0 : (1 << (fcoe_other_function))));
+
+	/* We are the reset owner iff:
+	 *    - No other protocol drivers present.
+	 *    - This is the lowest among fcoe functions. */
+	if (!(drv_presence & drv_presence_mask) &&
+			(ha->portnum < fcoe_other_function)) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb07f,
+		    "This host is Reset owner.\n");
+		ha->flags.nic_core_reset_owner = 1;
+	}
+}
+
+static int
+__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t drv_ack;
+
+	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+	if (rval == QLA_SUCCESS) {
+		drv_ack |= (1 << ha->portnum);
+		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+	}
+
+	return rval;
+}
+
+static int
+__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t drv_ack;
+
+	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+	if (rval == QLA_SUCCESS) {
+		drv_ack &= ~(1 << ha->portnum);
+		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
+	}
+
+	return rval;
+}
+
+static const char *
+qla83xx_dev_state_to_string(uint32_t dev_state)
+{
+	switch (dev_state) {
+	case QLA8XXX_DEV_COLD:
+		return "COLD/RE-INIT";
+	case QLA8XXX_DEV_INITIALIZING:
+		return "INITIALIZING";
+	case QLA8XXX_DEV_READY:
+		return "READY";
+	case QLA8XXX_DEV_NEED_RESET:
+		return "NEED RESET";
+	case QLA8XXX_DEV_NEED_QUIESCENT:
+		return "NEED QUIESCENT";
+	case QLA8XXX_DEV_FAILED:
+		return "FAILED";
+	case QLA8XXX_DEV_QUIESCENT:
+		return "QUIESCENT";
+	default:
+		return "Unknown";
+	}
+}
+
+/* Assumes idc-lock always held on entry */
+void
+qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t idc_audit_reg = 0, duration_secs = 0;
+
+	switch (audit_type) {
+	case IDC_AUDIT_TIMESTAMP:
+		ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
+		idc_audit_reg = (ha->portnum) |
+		    (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
+		qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+		break;
+
+	case IDC_AUDIT_COMPLETION:
+		duration_secs = ((jiffies_to_msecs(jiffies) -
+		    jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
+		idc_audit_reg = (ha->portnum) |
+		    (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
+		qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
+		break;
+
+	default:
+		ql_log(ql_log_warn, vha, 0xb078,
+		    "Invalid audit type specified.\n");
+		break;
+	}
+}
+
+/* Assumes idc_lock always held on entry */
+static int
+qla83xx_initiating_reset(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t  idc_control, dev_state;
+
+	__qla83xx_get_idc_control(vha, &idc_control);
+	if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
+		ql_log(ql_log_info, vha, 0xb080,
+		    "NIC Core reset has been disabled. idc-control=0x%x\n",
+		    idc_control);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Set NEED-RESET iff in READY state and we are the reset-owner */
+	qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+	if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
+		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
+		    QLA8XXX_DEV_NEED_RESET);
+		ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
+		qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
+	} else {
+		const char *state = qla83xx_dev_state_to_string(dev_state);
+		ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+
+		/* SV: XXX: Is timeout required here? */
+		/* Wait for IDC state change READY -> NEED_RESET */
+		while (dev_state == QLA8XXX_DEV_READY) {
+			qla83xx_idc_unlock(vha, 0);
+			msleep(200);
+			qla83xx_idc_lock(vha, 0);
+			qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+		}
+	}
+
+	/* Send IDC ack by writing to drv-ack register */
+	__qla83xx_set_drv_ack(vha);
+
+	return QLA_SUCCESS;
+}
+
+int
+__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
+{
+	return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+int
+__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
+{
+	return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
+}
+
+static int
+qla83xx_check_driver_presence(scsi_qla_host_t *vha)
+{
+	uint32_t drv_presence = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+	if (drv_presence & (1 << ha->portnum))
+		return QLA_SUCCESS;
+	else
+		return QLA_TEST_FAILED;
+}
+
+int
+qla83xx_nic_core_reset(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb058,
+	    "Entered  %s().\n", __func__);
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_log(ql_log_warn, vha, 0xb059,
+		    "Device in unrecoverable FAILED state.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	qla83xx_idc_lock(vha, 0);
+
+	if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0xb05a,
+		    "Function=0x%x has been removed from IDC participation.\n",
+		    ha->portnum);
+		rval = QLA_FUNCTION_FAILED;
+		goto exit;
+	}
+
+	qla83xx_reset_ownership(vha);
+
+	rval = qla83xx_initiating_reset(vha);
+
+	/*
+	 * Perform reset if we are the reset-owner,
+	 * else wait till IDC state changes to READY/FAILED.
+	 */
+	if (rval == QLA_SUCCESS) {
+		rval = qla83xx_idc_state_handler(vha);
+
+		if (rval == QLA_SUCCESS)
+			ha->flags.nic_core_hung = 0;
+		__qla83xx_clear_drv_ack(vha);
+	}
+
+exit:
+	qla83xx_idc_unlock(vha, 0);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
+
+	return rval;
+}
+
+int
+qla2xxx_mctp_dump(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_FUNCTION_FAILED;
+
+	if (!IS_MCTP_CAPABLE(ha)) {
+		/* This message can be removed from the final version */
+		ql_log(ql_log_info, vha, 0x506d,
+		    "This board is not MCTP capable\n");
+		return rval;
+	}
+
+	if (!ha->mctp_dump) {
+		ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
+		    MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
+
+		if (!ha->mctp_dump) {
+			ql_log(ql_log_warn, vha, 0x506e,
+			    "Failed to allocate memory for mctp dump\n");
+			return rval;
+		}
+	}
+
+#define MCTP_DUMP_STR_ADDR	0x00000000
+	rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
+	    MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x506f,
+		    "Failed to capture mctp dump\n");
+	} else {
+		ql_log(ql_log_info, vha, 0x5070,
+		    "Mctp dump capture for host (%ld/%p).\n",
+		    vha->host_no, ha->mctp_dump);
+		ha->mctp_dumped = 1;
+	}
+
+	if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
+		ha->flags.nic_core_reset_hdlr_active = 1;
+		rval = qla83xx_restart_nic_firmware(vha);
+		if (rval)
+			/* NIC Core reset failed. */
+			ql_log(ql_log_warn, vha, 0x5071,
+			    "Failed to restart nic firmware\n");
+		else
+			ql_dbg(ql_dbg_p3p, vha, 0xb084,
+			    "Restarted NIC firmware successfully.\n");
+		ha->flags.nic_core_reset_hdlr_active = 0;
+	}
+
+	return rval;
+
+}
+
+/*
+* qla2x00_quiesce_io
+* Description: This function will block the new I/Os
+*              Its not aborting any I/Os as context
+*              is not destroyed during quiescence
+* Arguments: scsi_qla_host_t
+* return   : void
+*/
+void
+qla2x00_quiesce_io(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp;
+
+	ql_dbg(ql_dbg_dpc, vha, 0x401d,
+	    "Quiescing I/O - ha=%p.\n", ha);
+
+	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
+	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		qla2x00_mark_all_devices_lost(vha, 0);
+		list_for_each_entry(vp, &ha->vp_list, list)
+			qla2x00_mark_all_devices_lost(vp, 0);
+	} else {
+		if (!atomic_read(&vha->loop_down_timer))
+			atomic_set(&vha->loop_down_timer,
+					LOOP_DOWN_TIME);
+	}
+	/* Wait for pending cmds to complete */
+	qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
+}
+
+void
+qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp;
+	unsigned long flags;
+	fc_port_t *fcport;
+	u16 i;
+
+	/* For ISP82XX, driver waits for completion of the commands.
+	 * online flag should be set.
+	 */
+	if (!(IS_P3P_TYPE(ha)))
+		vha->flags.online = 0;
+	ha->flags.chip_reset_done = 0;
+	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	vha->qla_stats.total_isp_aborts++;
+
+	ql_log(ql_log_info, vha, 0x00af,
+	    "Performing ISP error recovery - ha=%p.\n", ha);
+
+	/* For ISP82XX, reset_chip is just disabling interrupts.
+	 * Driver waits for the completion of the commands.
+	 * the interrupts need to be enabled.
+	 */
+	if (!(IS_P3P_TYPE(ha)))
+		ha->isp_ops->reset_chip(vha);
+
+	ha->flags.n2n_ae = 0;
+	ha->flags.lip_ae = 0;
+	ha->current_topology = 0;
+	ha->flags.fw_started = 0;
+	ha->flags.fw_init_done = 0;
+	ha->base_qpair->chip_reset++;
+	for (i = 0; i < ha->max_qpairs; i++) {
+		if (ha->queue_pair_map[i])
+			ha->queue_pair_map[i]->chip_reset =
+				ha->base_qpair->chip_reset;
+	}
+
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		qla2x00_mark_all_devices_lost(vha, 0);
+
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		list_for_each_entry(vp, &ha->vp_list, list) {
+			atomic_inc(&vp->vref_count);
+			spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+			qla2x00_mark_all_devices_lost(vp, 0);
+
+			spin_lock_irqsave(&ha->vport_slock, flags);
+			atomic_dec(&vp->vref_count);
+		}
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+	} else {
+		if (!atomic_read(&vha->loop_down_timer))
+			atomic_set(&vha->loop_down_timer,
+			    LOOP_DOWN_TIME);
+	}
+
+	/* Clear all async request states across all VPs. */
+	list_for_each_entry(fcport, &vha->vp_fcports, list)
+		fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		atomic_inc(&vp->vref_count);
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+		list_for_each_entry(fcport, &vp->vp_fcports, list)
+			fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		atomic_dec(&vp->vref_count);
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	if (!ha->flags.eeh_busy) {
+		/* Make sure for ISP 82XX IO DMA is complete */
+		if (IS_P3P_TYPE(ha)) {
+			qla82xx_chip_reset_cleanup(vha);
+			ql_log(ql_log_info, vha, 0x00b4,
+			    "Done chip reset cleanup.\n");
+
+			/* Done waiting for pending commands.
+			 * Reset the online flag.
+			 */
+			vha->flags.online = 0;
+		}
+
+		/* Requeue all commands in outstanding command list. */
+		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+	}
+	/* memory barrier */
+	wmb();
+}
+
+/*
+*  qla2x00_abort_isp
+*      Resets ISP and aborts all outstanding commands.
+*
+* Input:
+*      ha           = adapter block pointer.
+*
+* Returns:
+*      0 = success
+*/
+int
+qla2x00_abort_isp(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint8_t        status = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp;
+	struct req_que *req = ha->req_q_map[0];
+	unsigned long flags;
+
+	if (vha->flags.online) {
+		qla2x00_abort_isp_cleanup(vha);
+
+		if (IS_QLA8031(ha)) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb05c,
+			    "Clearing fcoe driver presence.\n");
+			if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
+				ql_dbg(ql_dbg_p3p, vha, 0xb073,
+				    "Error while clearing DRV-Presence.\n");
+		}
+
+		if (unlikely(pci_channel_offline(ha->pdev) &&
+		    ha->flags.pci_channel_io_perm_failure)) {
+			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+			status = 0;
+			return status;
+		}
+
+		ha->isp_ops->get_flash_version(vha, req->ring);
+
+		ha->isp_ops->nvram_config(vha);
+
+		if (!qla2x00_restart_isp(vha)) {
+			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+			if (!atomic_read(&vha->loop_down_timer)) {
+				/*
+				 * Issue marker command only when we are going
+				 * to start the I/O .
+				 */
+				vha->marker_needed = 1;
+			}
+
+			vha->flags.online = 1;
+
+			ha->isp_ops->enable_intrs(ha);
+
+			ha->isp_abort_cnt = 0;
+			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+			if (IS_QLA81XX(ha) || IS_QLA8031(ha))
+				qla2x00_get_fw_version(vha);
+			if (ha->fce) {
+				ha->flags.fce_enabled = 1;
+				memset(ha->fce, 0,
+				    fce_calc_size(ha->fce_bufs));
+				rval = qla2x00_enable_fce_trace(vha,
+				    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+				    &ha->fce_bufs);
+				if (rval) {
+					ql_log(ql_log_warn, vha, 0x8033,
+					    "Unable to reinitialize FCE "
+					    "(%d).\n", rval);
+					ha->flags.fce_enabled = 0;
+				}
+			}
+
+			if (ha->eft) {
+				memset(ha->eft, 0, EFT_SIZE);
+				rval = qla2x00_enable_eft_trace(vha,
+				    ha->eft_dma, EFT_NUM_BUFFERS);
+				if (rval) {
+					ql_log(ql_log_warn, vha, 0x8034,
+					    "Unable to reinitialize EFT "
+					    "(%d).\n", rval);
+				}
+			}
+		} else {	/* failed the ISP abort */
+			vha->flags.online = 1;
+			if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+				if (ha->isp_abort_cnt == 0) {
+					ql_log(ql_log_fatal, vha, 0x8035,
+					    "ISP error recover failed - "
+					    "board disabled.\n");
+					/*
+					 * The next call disables the board
+					 * completely.
+					 */
+					qla2x00_abort_isp_cleanup(vha);
+					vha->flags.online = 0;
+					clear_bit(ISP_ABORT_RETRY,
+					    &vha->dpc_flags);
+					status = 0;
+				} else { /* schedule another ISP abort */
+					ha->isp_abort_cnt--;
+					ql_dbg(ql_dbg_taskm, vha, 0x8020,
+					    "ISP abort - retry remaining %d.\n",
+					    ha->isp_abort_cnt);
+					status = 1;
+				}
+			} else {
+				ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+				ql_dbg(ql_dbg_taskm, vha, 0x8021,
+				    "ISP error recovery - retrying (%d) "
+				    "more times.\n", ha->isp_abort_cnt);
+				set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+				status = 1;
+			}
+		}
+
+	}
+
+	if (!status) {
+		ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
+		qla2x00_configure_hba(vha);
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		list_for_each_entry(vp, &ha->vp_list, list) {
+			if (vp->vp_idx) {
+				atomic_inc(&vp->vref_count);
+				spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+				qla2x00_vp_abort_isp(vp);
+
+				spin_lock_irqsave(&ha->vport_slock, flags);
+				atomic_dec(&vp->vref_count);
+			}
+		}
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+		if (IS_QLA8031(ha)) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb05d,
+			    "Setting back fcoe driver presence.\n");
+			if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
+				ql_dbg(ql_dbg_p3p, vha, 0xb074,
+				    "Error while setting DRV-Presence.\n");
+		}
+	} else {
+		ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+		       __func__);
+	}
+
+	return(status);
+}
+
+/*
+*  qla2x00_restart_isp
+*      restarts the ISP after a reset
+*
+* Input:
+*      ha = adapter block pointer.
+*
+* Returns:
+*      0 = success
+*/
+static int
+qla2x00_restart_isp(scsi_qla_host_t *vha)
+{
+	int status = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	/* If firmware needs to be loaded */
+	if (qla2x00_isp_firmware(vha)) {
+		vha->flags.online = 0;
+		status = ha->isp_ops->chip_diag(vha);
+		if (!status)
+			status = qla2x00_setup_chip(vha);
+	}
+
+	if (!status && !(status = qla2x00_init_rings(vha))) {
+		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+		ha->flags.chip_reset_done = 1;
+
+		/* Initialize the queues in use */
+		qla25xx_init_queues(ha);
+
+		status = qla2x00_fw_ready(vha);
+		if (!status) {
+			/* Issue a marker after FW becomes ready. */
+			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		}
+
+		/* if no cable then assume it's good */
+		if ((vha->device_flags & DFLG_NO_CABLE))
+			status = 0;
+	}
+	return (status);
+}
+
+static int
+qla25xx_init_queues(struct qla_hw_data *ha)
+{
+	struct rsp_que *rsp = NULL;
+	struct req_que *req = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	int ret = -1;
+	int i;
+
+	for (i = 1; i < ha->max_rsp_queues; i++) {
+		rsp = ha->rsp_q_map[i];
+		if (rsp && test_bit(i, ha->rsp_qid_map)) {
+			rsp->options &= ~BIT_0;
+			ret = qla25xx_init_rsp_que(base_vha, rsp);
+			if (ret != QLA_SUCCESS)
+				ql_dbg(ql_dbg_init, base_vha, 0x00ff,
+				    "%s Rsp que: %d init failed.\n",
+				    __func__, rsp->id);
+			else
+				ql_dbg(ql_dbg_init, base_vha, 0x0100,
+				    "%s Rsp que: %d inited.\n",
+				    __func__, rsp->id);
+		}
+	}
+	for (i = 1; i < ha->max_req_queues; i++) {
+		req = ha->req_q_map[i];
+		if (req && test_bit(i, ha->req_qid_map)) {
+			/* Clear outstanding commands array. */
+			req->options &= ~BIT_0;
+			ret = qla25xx_init_req_que(base_vha, req);
+			if (ret != QLA_SUCCESS)
+				ql_dbg(ql_dbg_init, base_vha, 0x0101,
+				    "%s Req que: %d init failed.\n",
+				    __func__, req->id);
+			else
+				ql_dbg(ql_dbg_init, base_vha, 0x0102,
+				    "%s Req que: %d inited.\n",
+				    __func__, req->id);
+		}
+	}
+	return ret;
+}
+
+/*
+* qla2x00_reset_adapter
+*      Reset adapter.
+*
+* Input:
+*      ha = adapter block pointer.
+*/
+void
+qla2x00_reset_adapter(scsi_qla_host_t *vha)
+{
+	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	vha->flags.online = 0;
+	ha->isp_ops->disable_intrs(ha);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
+	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla24xx_reset_adapter(scsi_qla_host_t *vha)
+{
+	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (IS_P3P_TYPE(ha))
+		return;
+
+	vha->flags.online = 0;
+	ha->isp_ops->disable_intrs(ha);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
+	RD_REG_DWORD(&reg->hccr);
+	WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+	RD_REG_DWORD(&reg->hccr);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (IS_NOPOLLING_TYPE(ha))
+		ha->isp_ops->enable_intrs(ha);
+}
+
+/* On sparc systems, obtain port and node WWN from firmware
+ * properties.
+ */
+static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
+	struct nvram_24xx *nv)
+{
+#ifdef CONFIG_SPARC
+	struct qla_hw_data *ha = vha->hw;
+	struct pci_dev *pdev = ha->pdev;
+	struct device_node *dp = pci_device_to_OF_node(pdev);
+	const u8 *val;
+	int len;
+
+	val = of_get_property(dp, "port-wwn", &len);
+	if (val && len >= WWN_SIZE)
+		memcpy(nv->port_name, val, WWN_SIZE);
+
+	val = of_get_property(dp, "node-wwn", &len);
+	if (val && len >= WWN_SIZE)
+		memcpy(nv->node_name, val, WWN_SIZE);
+#endif
+}
+
+int
+qla24xx_nvram_config(scsi_qla_host_t *vha)
+{
+	int   rval;
+	struct init_cb_24xx *icb;
+	struct nvram_24xx *nv;
+	uint32_t *dptr;
+	uint8_t  *dptr1, *dptr2;
+	uint32_t chksum;
+	uint16_t cnt;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = QLA_SUCCESS;
+	icb = (struct init_cb_24xx *)ha->init_cb;
+	nv = ha->nvram;
+
+	/* Determine NVRAM starting address. */
+	if (ha->port_no == 0) {
+		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
+		ha->vpd_base = FA_NVRAM_VPD0_ADDR;
+	} else {
+		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
+		ha->vpd_base = FA_NVRAM_VPD1_ADDR;
+	}
+
+	ha->nvram_size = sizeof(struct nvram_24xx);
+	ha->vpd_size = FA_NVRAM_VPD_SIZE;
+
+	/* Get VPD data into cache */
+	ha->vpd = ha->nvram + VPD_OFFSET;
+	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
+	    ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
+
+	/* Get NVRAM data into cache and calculate checksum. */
+	dptr = (uint32_t *)nv;
+	ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
+	    ha->nvram_size);
+	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+		chksum += le32_to_cpu(*dptr);
+
+	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
+	    "Contents of NVRAM\n");
+	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
+	    (uint8_t *)nv, ha->nvram_size);
+
+	/* Bad NVRAM data, set defaults parameters. */
+	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
+	    || nv->id[3] != ' ' ||
+	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+		/* Reset NVRAM data. */
+		ql_log(ql_log_warn, vha, 0x006b,
+		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
+		    "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+		ql_log(ql_log_warn, vha, 0x006c,
+		    "Falling back to functioning (yet invalid -- WWPN) "
+		    "defaults.\n");
+
+		/*
+		 * Set default initialization control block.
+		 */
+		memset(nv, 0, ha->nvram_size);
+		nv->nvram_version = cpu_to_le16(ICB_VERSION);
+		nv->version = cpu_to_le16(ICB_VERSION);
+		nv->frame_payload_size = 2048;
+		nv->execution_throttle = cpu_to_le16(0xFFFF);
+		nv->exchange_count = cpu_to_le16(0);
+		nv->hard_address = cpu_to_le16(124);
+		nv->port_name[0] = 0x21;
+		nv->port_name[1] = 0x00 + ha->port_no + 1;
+		nv->port_name[2] = 0x00;
+		nv->port_name[3] = 0xe0;
+		nv->port_name[4] = 0x8b;
+		nv->port_name[5] = 0x1c;
+		nv->port_name[6] = 0x55;
+		nv->port_name[7] = 0x86;
+		nv->node_name[0] = 0x20;
+		nv->node_name[1] = 0x00;
+		nv->node_name[2] = 0x00;
+		nv->node_name[3] = 0xe0;
+		nv->node_name[4] = 0x8b;
+		nv->node_name[5] = 0x1c;
+		nv->node_name[6] = 0x55;
+		nv->node_name[7] = 0x86;
+		qla24xx_nvram_wwn_from_ofw(vha, nv);
+		nv->login_retry_count = cpu_to_le16(8);
+		nv->interrupt_delay_timer = cpu_to_le16(0);
+		nv->login_timeout = cpu_to_le16(0);
+		nv->firmware_options_1 =
+		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+		nv->firmware_options_2 = cpu_to_le32(2 << 4);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		nv->firmware_options_3 = cpu_to_le32(2 << 13);
+		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
+		nv->efi_parameters = cpu_to_le32(0);
+		nv->reset_delay = 5;
+		nv->max_luns_per_target = cpu_to_le16(128);
+		nv->port_down_retry_count = cpu_to_le16(30);
+		nv->link_down_timeout = cpu_to_le16(30);
+
+		rval = 1;
+	}
+
+	if (qla_tgt_mode_enabled(vha)) {
+		/* Don't enable full login after initial LIP */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
+		/* Don't enable LIP full login for initiator */
+		nv->host_p &= cpu_to_le32(~BIT_10);
+	}
+
+	qlt_24xx_config_nvram_stage1(vha, nv);
+
+	/* Reset Initialization control block */
+	memset(icb, 0, ha->init_cb_size);
+
+	/* Copy 1st segment. */
+	dptr1 = (uint8_t *)icb;
+	dptr2 = (uint8_t *)&nv->version;
+	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	icb->login_retry_count = nv->login_retry_count;
+	icb->link_down_on_nos = nv->link_down_on_nos;
+
+	/* Copy 2nd segment. */
+	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
+	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
+	cnt = (uint8_t *)&icb->reserved_3 -
+	    (uint8_t *)&icb->interrupt_delay_timer;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	/*
+	 * Setup driver NVRAM options.
+	 */
+	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
+	    "QLA2462");
+
+	qlt_24xx_config_nvram_stage2(vha, icb);
+
+	if (nv->host_p & cpu_to_le32(BIT_15)) {
+		/* Use alternate WWN? */
+		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+	}
+
+	/* Prepare nodename */
+	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
+		/*
+		 * Firmware will apply the following mask if the nodename was
+		 * not provided.
+		 */
+		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+		icb->node_name[0] &= 0xF0;
+	}
+
+	/* Set host adapter parameters. */
+	ha->flags.disable_risc_code_load = 0;
+	ha->flags.enable_lip_reset = 0;
+	ha->flags.enable_lip_full_login =
+	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+	ha->flags.enable_target_reset =
+	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+	ha->flags.enable_led_scheme = 0;
+	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+
+	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
+	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+	memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
+	    sizeof(ha->fw_seriallink_options24));
+
+	/* save HBA serial number */
+	ha->serial0 = icb->port_name[5];
+	ha->serial1 = icb->port_name[6];
+	ha->serial2 = icb->port_name[7];
+	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+	icb->execution_throttle = cpu_to_le16(0xFFFF);
+
+	ha->retry_count = le16_to_cpu(nv->login_retry_count);
+
+	/* Set minimum login_timeout to 4 seconds. */
+	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
+		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
+	if (le16_to_cpu(nv->login_timeout) < 4)
+		nv->login_timeout = cpu_to_le16(4);
+	ha->login_timeout = le16_to_cpu(nv->login_timeout);
+
+	/* Set minimum RATOV to 100 tenths of a second. */
+	ha->r_a_tov = 100;
+
+	ha->loop_reset_delay = nv->reset_delay;
+
+	/* Link Down Timeout = 0:
+	 *
+	 * 	When Port Down timer expires we will start returning
+	 *	I/O's to OS with "DID_NO_CONNECT".
+	 *
+	 * Link Down Timeout != 0:
+	 *
+	 *	 The driver waits for the link to come up after link down
+	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
+	 */
+	if (le16_to_cpu(nv->link_down_timeout) == 0) {
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+	} else {
+		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - ha->link_down_timeout);
+	}
+
+	/* Need enough time to try and get the port back. */
+	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
+	if (qlport_down_retry)
+		ha->port_down_retry_count = qlport_down_retry;
+
+	/* Set login_retry_count */
+	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
+	if (ha->port_down_retry_count ==
+	    le16_to_cpu(nv->port_down_retry_count) &&
+	    ha->port_down_retry_count > 3)
+		ha->login_retry_count = ha->port_down_retry_count;
+	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+		ha->login_retry_count = ha->port_down_retry_count;
+	if (ql2xloginretrycount)
+		ha->login_retry_count = ql2xloginretrycount;
+
+	/* Enable ZIO. */
+	if (!vha->flags.init_done) {
+		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
+		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
+		    le16_to_cpu(icb->interrupt_delay_timer): 2;
+	}
+	icb->firmware_options_2 &= cpu_to_le32(
+	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
+	vha->flags.process_response_queue = 0;
+	if (ha->zio_mode != QLA_ZIO_DISABLED) {
+		ha->zio_mode = QLA_ZIO_MODE_6;
+
+		ql_log(ql_log_info, vha, 0x006f,
+		    "ZIO mode %d enabled; timer delay (%d us).\n",
+		    ha->zio_mode, ha->zio_timer * 100);
+
+		icb->firmware_options_2 |= cpu_to_le32(
+		    (uint32_t)ha->zio_mode);
+		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
+		vha->flags.process_response_queue = 1;
+	}
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0070,
+		    "NVRAM configuration failed.\n");
+	}
+	return (rval);
+}
+
+uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+{
+	struct qla27xx_image_status pri_image_status, sec_image_status;
+	uint8_t valid_pri_image, valid_sec_image;
+	uint32_t *wptr;
+	uint32_t cnt, chksum, size;
+	struct qla_hw_data *ha = vha->hw;
+
+	valid_pri_image = valid_sec_image = 1;
+	ha->active_image = 0;
+	size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+
+	if (!ha->flt_region_img_status_pri) {
+		valid_pri_image = 0;
+		goto check_sec_image;
+	}
+
+	qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
+	    ha->flt_region_img_status_pri, size);
+
+	if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+		ql_dbg(ql_dbg_init, vha, 0x018b,
+		    "Primary image signature (0x%x) not valid\n",
+		    pri_image_status.signature);
+		valid_pri_image = 0;
+		goto check_sec_image;
+	}
+
+	wptr = (uint32_t *)(&pri_image_status);
+	cnt = size;
+
+	for (chksum = 0; cnt--; wptr++)
+		chksum += le32_to_cpu(*wptr);
+
+	if (chksum) {
+		ql_dbg(ql_dbg_init, vha, 0x018c,
+		    "Checksum validation failed for primary image (0x%x)\n",
+		    chksum);
+		valid_pri_image = 0;
+	}
+
+check_sec_image:
+	if (!ha->flt_region_img_status_sec) {
+		valid_sec_image = 0;
+		goto check_valid_image;
+	}
+
+	qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+	    ha->flt_region_img_status_sec, size);
+
+	if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+		ql_dbg(ql_dbg_init, vha, 0x018d,
+		    "Secondary image signature(0x%x) not valid\n",
+		    sec_image_status.signature);
+		valid_sec_image = 0;
+		goto check_valid_image;
+	}
+
+	wptr = (uint32_t *)(&sec_image_status);
+	cnt = size;
+	for (chksum = 0; cnt--; wptr++)
+		chksum += le32_to_cpu(*wptr);
+	if (chksum) {
+		ql_dbg(ql_dbg_init, vha, 0x018e,
+		    "Checksum validation failed for secondary image (0x%x)\n",
+		    chksum);
+		valid_sec_image = 0;
+	}
+
+check_valid_image:
+	if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
+		ha->active_image = QLA27XX_PRIMARY_IMAGE;
+	if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
+		if (!ha->active_image ||
+		    pri_image_status.generation_number <
+		    sec_image_status.generation_number)
+			ha->active_image = QLA27XX_SECONDARY_IMAGE;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+	    ha->active_image == 0 ? "default bootld and fw" :
+	    ha->active_image == 1 ? "primary" :
+	    ha->active_image == 2 ? "secondary" :
+	    "Invalid");
+
+	return ha->active_image;
+}
+
+static int
+qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+    uint32_t faddr)
+{
+	int	rval = QLA_SUCCESS;
+	int	segments, fragment;
+	uint32_t *dcode, dlen;
+	uint32_t risc_addr;
+	uint32_t risc_size;
+	uint32_t i;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	ql_dbg(ql_dbg_init, vha, 0x008b,
+	    "FW: Loading firmware from flash (%x).\n", faddr);
+
+	rval = QLA_SUCCESS;
+
+	segments = FA_RISC_CODE_SEGMENTS;
+	dcode = (uint32_t *)req->ring;
+	*srisc_addr = 0;
+
+	if (IS_QLA27XX(ha) &&
+	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+		faddr = ha->flt_region_fw_sec;
+
+	/* Validate firmware image by checking version. */
+	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
+	for (i = 0; i < 4; i++)
+		dcode[i] = be32_to_cpu(dcode[i]);
+	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
+	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
+	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+		dcode[3] == 0)) {
+		ql_log(ql_log_fatal, vha, 0x008c,
+		    "Unable to verify the integrity of flash firmware "
+		    "image.\n");
+		ql_log(ql_log_fatal, vha, 0x008d,
+		    "Firmware data: %08x %08x %08x %08x.\n",
+		    dcode[0], dcode[1], dcode[2], dcode[3]);
+
+		return QLA_FUNCTION_FAILED;
+	}
+
+	while (segments && rval == QLA_SUCCESS) {
+		/* Read segment's load information. */
+		qla24xx_read_flash_data(vha, dcode, faddr, 4);
+
+		risc_addr = be32_to_cpu(dcode[2]);
+		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
+		risc_size = be32_to_cpu(dcode[3]);
+
+		fragment = 0;
+		while (risc_size > 0 && rval == QLA_SUCCESS) {
+			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+			if (dlen > risc_size)
+				dlen = risc_size;
+
+			ql_dbg(ql_dbg_init, vha, 0x008e,
+			    "Loading risc segment@ risc addr %x "
+			    "number of dwords 0x%x offset 0x%x.\n",
+			    risc_addr, dlen, faddr);
+
+			qla24xx_read_flash_data(vha, dcode, faddr, dlen);
+			for (i = 0; i < dlen; i++)
+				dcode[i] = swab32(dcode[i]);
+
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+			    dlen);
+			if (rval) {
+				ql_log(ql_log_fatal, vha, 0x008f,
+				    "Failed to load segment %d of firmware.\n",
+				    fragment);
+				return QLA_FUNCTION_FAILED;
+			}
+
+			faddr += dlen;
+			risc_addr += dlen;
+			risc_size -= dlen;
+			fragment++;
+		}
+
+		/* Next segment. */
+		segments--;
+	}
+
+	if (!IS_QLA27XX(ha))
+		return rval;
+
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	ql_dbg(ql_dbg_init, vha, 0x0161,
+	    "Loading fwdump template from %x\n", faddr);
+	qla24xx_read_flash_data(vha, dcode, faddr, 7);
+	risc_size = be32_to_cpu(dcode[2]);
+	ql_dbg(ql_dbg_init, vha, 0x0162,
+	    "-> array size %x dwords\n", risc_size);
+	if (risc_size == 0 || risc_size == ~0)
+		goto default_template;
+
+	dlen = (risc_size - 8) * sizeof(*dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0163,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x0164,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto default_template;
+	}
+
+	faddr += 7;
+	risc_size -= 8;
+	dcode = ha->fw_dump_template;
+	qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = le32_to_cpu(dcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(dcode)) {
+		ql_log(ql_log_warn, vha, 0x0165,
+		    "Failed fwdump template validate\n");
+		goto default_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0166,
+	    "-> template size %x bytes\n", dlen);
+	if (dlen > risc_size * sizeof(*dcode)) {
+		ql_log(ql_log_warn, vha, 0x0167,
+		    "Failed fwdump template exceeds array by %zx bytes\n",
+		    (size_t)(dlen - risc_size * sizeof(*dcode)));
+		goto default_template;
+	}
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+default_template:
+	ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	dlen = qla27xx_fwdt_template_default_size();
+	ql_dbg(ql_dbg_init, vha, 0x0169,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x016a,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto failed_template;
+	}
+
+	dcode = ha->fw_dump_template;
+	risc_size = dlen / sizeof(*dcode);
+	memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = be32_to_cpu(dcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+		ql_log(ql_log_warn, vha, 0x016b,
+		    "Failed fwdump template validate\n");
+		goto failed_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+	ql_dbg(ql_dbg_init, vha, 0x016c,
+	    "-> template size %x bytes\n", dlen);
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+failed_template:
+	ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+	return rval;
+}
+
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
+
+int
+qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+	int	rval;
+	int	i, fragment;
+	uint16_t *wcode, *fwcode;
+	uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
+	struct fw_blob *blob;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	/* Load firmware blob. */
+	blob = qla2x00_request_firmware(vha);
+	if (!blob) {
+		ql_log(ql_log_info, vha, 0x0083,
+		    "Firmware image unavailable.\n");
+		ql_log(ql_log_info, vha, 0x0084,
+		    "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	rval = QLA_SUCCESS;
+
+	wcode = (uint16_t *)req->ring;
+	*srisc_addr = 0;
+	fwcode = (uint16_t *)blob->fw->data;
+	fwclen = 0;
+
+	/* Validate firmware image by checking version. */
+	if (blob->fw->size < 8 * sizeof(uint16_t)) {
+		ql_log(ql_log_fatal, vha, 0x0085,
+		    "Unable to verify integrity of firmware image (%zd).\n",
+		    blob->fw->size);
+		goto fail_fw_integrity;
+	}
+	for (i = 0; i < 4; i++)
+		wcode[i] = be16_to_cpu(fwcode[i + 4]);
+	if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
+	    wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
+		wcode[2] == 0 && wcode[3] == 0)) {
+		ql_log(ql_log_fatal, vha, 0x0086,
+		    "Unable to verify integrity of firmware image.\n");
+		ql_log(ql_log_fatal, vha, 0x0087,
+		    "Firmware data: %04x %04x %04x %04x.\n",
+		    wcode[0], wcode[1], wcode[2], wcode[3]);
+		goto fail_fw_integrity;
+	}
+
+	seg = blob->segs;
+	while (*seg && rval == QLA_SUCCESS) {
+		risc_addr = *seg;
+		*srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
+		risc_size = be16_to_cpu(fwcode[3]);
+
+		/* Validate firmware image size. */
+		fwclen += risc_size * sizeof(uint16_t);
+		if (blob->fw->size < fwclen) {
+			ql_log(ql_log_fatal, vha, 0x0088,
+			    "Unable to verify integrity of firmware image "
+			    "(%zd).\n", blob->fw->size);
+			goto fail_fw_integrity;
+		}
+
+		fragment = 0;
+		while (risc_size > 0 && rval == QLA_SUCCESS) {
+			wlen = (uint16_t)(ha->fw_transfer_size >> 1);
+			if (wlen > risc_size)
+				wlen = risc_size;
+			ql_dbg(ql_dbg_init, vha, 0x0089,
+			    "Loading risc segment@ risc addr %x number of "
+			    "words 0x%x.\n", risc_addr, wlen);
+
+			for (i = 0; i < wlen; i++)
+				wcode[i] = swab16(fwcode[i]);
+
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+			    wlen);
+			if (rval) {
+				ql_log(ql_log_fatal, vha, 0x008a,
+				    "Failed to load segment %d of firmware.\n",
+				    fragment);
+				break;
+			}
+
+			fwcode += wlen;
+			risc_addr += wlen;
+			risc_size -= wlen;
+			fragment++;
+		}
+
+		/* Next segment. */
+		seg++;
+	}
+	return rval;
+
+fail_fw_integrity:
+	return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+	int	rval;
+	int	segments, fragment;
+	uint32_t *dcode, dlen;
+	uint32_t risc_addr;
+	uint32_t risc_size;
+	uint32_t i;
+	struct fw_blob *blob;
+	const uint32_t *fwcode;
+	uint32_t fwclen;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	/* Load firmware blob. */
+	blob = qla2x00_request_firmware(vha);
+	if (!blob) {
+		ql_log(ql_log_warn, vha, 0x0090,
+		    "Firmware image unavailable.\n");
+		ql_log(ql_log_warn, vha, 0x0091,
+		    "Firmware images can be retrieved from: "
+		    QLA_FW_URL ".\n");
+
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x0092,
+	    "FW: Loading via request-firmware.\n");
+
+	rval = QLA_SUCCESS;
+
+	segments = FA_RISC_CODE_SEGMENTS;
+	dcode = (uint32_t *)req->ring;
+	*srisc_addr = 0;
+	fwcode = (uint32_t *)blob->fw->data;
+	fwclen = 0;
+
+	/* Validate firmware image by checking version. */
+	if (blob->fw->size < 8 * sizeof(uint32_t)) {
+		ql_log(ql_log_fatal, vha, 0x0093,
+		    "Unable to verify integrity of firmware image (%zd).\n",
+		    blob->fw->size);
+		return QLA_FUNCTION_FAILED;
+	}
+	for (i = 0; i < 4; i++)
+		dcode[i] = be32_to_cpu(fwcode[i + 4]);
+	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
+	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
+	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+		dcode[3] == 0)) {
+		ql_log(ql_log_fatal, vha, 0x0094,
+		    "Unable to verify integrity of firmware image (%zd).\n",
+		    blob->fw->size);
+		ql_log(ql_log_fatal, vha, 0x0095,
+		    "Firmware data: %08x %08x %08x %08x.\n",
+		    dcode[0], dcode[1], dcode[2], dcode[3]);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	while (segments && rval == QLA_SUCCESS) {
+		risc_addr = be32_to_cpu(fwcode[2]);
+		*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
+		risc_size = be32_to_cpu(fwcode[3]);
+
+		/* Validate firmware image size. */
+		fwclen += risc_size * sizeof(uint32_t);
+		if (blob->fw->size < fwclen) {
+			ql_log(ql_log_fatal, vha, 0x0096,
+			    "Unable to verify integrity of firmware image "
+			    "(%zd).\n", blob->fw->size);
+			return QLA_FUNCTION_FAILED;
+		}
+
+		fragment = 0;
+		while (risc_size > 0 && rval == QLA_SUCCESS) {
+			dlen = (uint32_t)(ha->fw_transfer_size >> 2);
+			if (dlen > risc_size)
+				dlen = risc_size;
+
+			ql_dbg(ql_dbg_init, vha, 0x0097,
+			    "Loading risc segment@ risc addr %x "
+			    "number of dwords 0x%x.\n", risc_addr, dlen);
+
+			for (i = 0; i < dlen; i++)
+				dcode[i] = swab32(fwcode[i]);
+
+			rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+			    dlen);
+			if (rval) {
+				ql_log(ql_log_fatal, vha, 0x0098,
+				    "Failed to load segment %d of firmware.\n",
+				    fragment);
+				return QLA_FUNCTION_FAILED;
+			}
+
+			fwcode += dlen;
+			risc_addr += dlen;
+			risc_size -= dlen;
+			fragment++;
+		}
+
+		/* Next segment. */
+		segments--;
+	}
+
+	if (!IS_QLA27XX(ha))
+		return rval;
+
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	ql_dbg(ql_dbg_init, vha, 0x171,
+	    "Loading fwdump template from %x\n",
+	    (uint32_t)((void *)fwcode - (void *)blob->fw->data));
+	risc_size = be32_to_cpu(fwcode[2]);
+	ql_dbg(ql_dbg_init, vha, 0x172,
+	    "-> array size %x dwords\n", risc_size);
+	if (risc_size == 0 || risc_size == ~0)
+		goto default_template;
+
+	dlen = (risc_size - 8) * sizeof(*fwcode);
+	ql_dbg(ql_dbg_init, vha, 0x0173,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x0174,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto default_template;
+	}
+
+	fwcode += 7;
+	risc_size -= 8;
+	dcode = ha->fw_dump_template;
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = le32_to_cpu(fwcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(dcode)) {
+		ql_log(ql_log_warn, vha, 0x0175,
+		    "Failed fwdump template validate\n");
+		goto default_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(dcode);
+	ql_dbg(ql_dbg_init, vha, 0x0176,
+	    "-> template size %x bytes\n", dlen);
+	if (dlen > risc_size * sizeof(*fwcode)) {
+		ql_log(ql_log_warn, vha, 0x0177,
+		    "Failed fwdump template exceeds array by %zx bytes\n",
+		    (size_t)(dlen - risc_size * sizeof(*fwcode)));
+		goto default_template;
+	}
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+default_template:
+	ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+
+	dlen = qla27xx_fwdt_template_default_size();
+	ql_dbg(ql_dbg_init, vha, 0x0179,
+	    "-> template allocating %x bytes...\n", dlen);
+	ha->fw_dump_template = vmalloc(dlen);
+	if (!ha->fw_dump_template) {
+		ql_log(ql_log_warn, vha, 0x017a,
+		    "Failed fwdump template allocate %x bytes.\n", risc_size);
+		goto failed_template;
+	}
+
+	dcode = ha->fw_dump_template;
+	risc_size = dlen / sizeof(*fwcode);
+	fwcode = qla27xx_fwdt_template_default();
+	for (i = 0; i < risc_size; i++)
+		dcode[i] = be32_to_cpu(fwcode[i]);
+
+	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
+		ql_log(ql_log_warn, vha, 0x017b,
+		    "Failed fwdump template validate\n");
+		goto failed_template;
+	}
+
+	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
+	ql_dbg(ql_dbg_init, vha, 0x017c,
+	    "-> template size %x bytes\n", dlen);
+	ha->fw_dump_template_len = dlen;
+	return rval;
+
+failed_template:
+	ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+	return rval;
+}
+
+int
+qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+	int rval;
+
+	if (ql2xfwloadbin == 1)
+		return qla81xx_load_risc(vha, srisc_addr);
+
+	/*
+	 * FW Load priority:
+	 * 1) Firmware via request-firmware interface (.bin file).
+	 * 2) Firmware residing in flash.
+	 */
+	rval = qla24xx_load_risc_blob(vha, srisc_addr);
+	if (rval == QLA_SUCCESS)
+		return rval;
+
+	return qla24xx_load_risc_flash(vha, srisc_addr,
+	    vha->hw->flt_region_fw);
+}
+
+int
+qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ql2xfwloadbin == 2)
+		goto try_blob_fw;
+
+	/*
+	 * FW Load priority:
+	 * 1) Firmware residing in flash.
+	 * 2) Firmware via request-firmware interface (.bin file).
+	 * 3) Golden-Firmware residing in flash -- limited operation.
+	 */
+	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
+	if (rval == QLA_SUCCESS)
+		return rval;
+
+try_blob_fw:
+	rval = qla24xx_load_risc_blob(vha, srisc_addr);
+	if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+		return rval;
+
+	ql_log(ql_log_info, vha, 0x0099,
+	    "Attempting to fallback to golden firmware.\n");
+	rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
+	ha->flags.running_gold_fw = 1;
+	return rval;
+}
+
+void
+qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
+{
+	int ret, retries;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ha->flags.pci_channel_io_perm_failure)
+		return;
+	if (!IS_FWI2_CAPABLE(ha))
+		return;
+	if (!ha->fw_major_version)
+		return;
+	if (!ha->flags.fw_started)
+		return;
+
+	ret = qla2x00_stop_firmware(vha);
+	for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
+	    ret != QLA_INVALID_COMMAND && retries ; retries--) {
+		ha->isp_ops->reset_chip(vha);
+		if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
+			continue;
+		if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
+			continue;
+		ql_log(ql_log_info, vha, 0x8015,
+		    "Attempting retry of stop-firmware command.\n");
+		ret = qla2x00_stop_firmware(vha);
+	}
+
+	QLA_FW_STOPPED(ha);
+	ha->flags.fw_init_done = 0;
+}
+
+int
+qla24xx_configure_vhba(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	int rval2;
+	uint16_t mb[MAILBOX_REGISTER_COUNT];
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	struct req_que *req;
+	struct rsp_que *rsp;
+
+	if (!vha->vp_idx)
+		return -EINVAL;
+
+	rval = qla2x00_fw_ready(base_vha);
+	if (vha->qpair)
+		req = vha->qpair->req;
+	else
+		req = ha->req_q_map[0];
+	rsp = req->rsp;
+
+	if (rval == QLA_SUCCESS) {
+		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+		qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+	}
+
+	vha->flags.management_server_logged_in = 0;
+
+	/* Login to SNS first */
+	rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
+	    BIT_1);
+	if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
+		if (rval2 == QLA_MEMORY_ALLOC_FAILED)
+			ql_dbg(ql_dbg_init, vha, 0x0120,
+			    "Failed SNS login: loop_id=%x, rval2=%d\n",
+			    NPH_SNS, rval2);
+		else
+			ql_dbg(ql_dbg_init, vha, 0x0103,
+			    "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+			    "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+			    NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
+		return (QLA_FUNCTION_FAILED);
+	}
+
+	atomic_set(&vha->loop_down_timer, 0);
+	atomic_set(&vha->loop_state, LOOP_UP);
+	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+	rval = qla2x00_loop_resync(base_vha);
+
+	return rval;
+}
+
+/* 84XX Support **************************************************************/
+
+static LIST_HEAD(qla_cs84xx_list);
+static DEFINE_MUTEX(qla_cs84xx_mutex);
+
+static struct qla_chip_state_84xx *
+qla84xx_get_chip(struct scsi_qla_host *vha)
+{
+	struct qla_chip_state_84xx *cs84xx;
+	struct qla_hw_data *ha = vha->hw;
+
+	mutex_lock(&qla_cs84xx_mutex);
+
+	/* Find any shared 84xx chip. */
+	list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
+		if (cs84xx->bus == ha->pdev->bus) {
+			kref_get(&cs84xx->kref);
+			goto done;
+		}
+	}
+
+	cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
+	if (!cs84xx)
+		goto done;
+
+	kref_init(&cs84xx->kref);
+	spin_lock_init(&cs84xx->access_lock);
+	mutex_init(&cs84xx->fw_update_mutex);
+	cs84xx->bus = ha->pdev->bus;
+
+	list_add_tail(&cs84xx->list, &qla_cs84xx_list);
+done:
+	mutex_unlock(&qla_cs84xx_mutex);
+	return cs84xx;
+}
+
+static void
+__qla84xx_chip_release(struct kref *kref)
+{
+	struct qla_chip_state_84xx *cs84xx =
+	    container_of(kref, struct qla_chip_state_84xx, kref);
+
+	mutex_lock(&qla_cs84xx_mutex);
+	list_del(&cs84xx->list);
+	mutex_unlock(&qla_cs84xx_mutex);
+	kfree(cs84xx);
+}
+
+void
+qla84xx_put_chip(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	if (ha->cs84xx)
+		kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
+}
+
+static int
+qla84xx_init_chip(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint16_t status[2];
+	struct qla_hw_data *ha = vha->hw;
+
+	mutex_lock(&ha->cs84xx->fw_update_mutex);
+
+	rval = qla84xx_verify_chip(vha, status);
+
+	mutex_unlock(&ha->cs84xx->fw_update_mutex);
+
+	return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
+	    QLA_SUCCESS;
+}
+
+/* 81XX Support **************************************************************/
+
+int
+qla81xx_nvram_config(scsi_qla_host_t *vha)
+{
+	int   rval;
+	struct init_cb_81xx *icb;
+	struct nvram_81xx *nv;
+	uint32_t *dptr;
+	uint8_t  *dptr1, *dptr2;
+	uint32_t chksum;
+	uint16_t cnt;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = QLA_SUCCESS;
+	icb = (struct init_cb_81xx *)ha->init_cb;
+	nv = ha->nvram;
+
+	/* Determine NVRAM starting address. */
+	ha->nvram_size = sizeof(struct nvram_81xx);
+	ha->vpd_size = FA_NVRAM_VPD_SIZE;
+	if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+		ha->vpd_size = FA_VPD_SIZE_82XX;
+
+	/* Get VPD data into cache */
+	ha->vpd = ha->nvram + VPD_OFFSET;
+	ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+	    ha->vpd_size);
+
+	/* Get NVRAM data into cache and calculate checksum. */
+	ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+	    ha->nvram_size);
+	dptr = (uint32_t *)nv;
+	for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+		chksum += le32_to_cpu(*dptr);
+
+	ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
+	    "Contents of NVRAM:\n");
+	ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
+	    (uint8_t *)nv, ha->nvram_size);
+
+	/* Bad NVRAM data, set defaults parameters. */
+	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
+	    || nv->id[3] != ' ' ||
+	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
+		/* Reset NVRAM data. */
+		ql_log(ql_log_info, vha, 0x0073,
+		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
+		    "version=0x%x.\n", chksum, nv->id[0],
+		    le16_to_cpu(nv->nvram_version));
+		ql_log(ql_log_info, vha, 0x0074,
+		    "Falling back to functioning (yet invalid -- WWPN) "
+		    "defaults.\n");
+
+		/*
+		 * Set default initialization control block.
+		 */
+		memset(nv, 0, ha->nvram_size);
+		nv->nvram_version = cpu_to_le16(ICB_VERSION);
+		nv->version = cpu_to_le16(ICB_VERSION);
+		nv->frame_payload_size = 2048;
+		nv->execution_throttle = cpu_to_le16(0xFFFF);
+		nv->exchange_count = cpu_to_le16(0);
+		nv->port_name[0] = 0x21;
+		nv->port_name[1] = 0x00 + ha->port_no + 1;
+		nv->port_name[2] = 0x00;
+		nv->port_name[3] = 0xe0;
+		nv->port_name[4] = 0x8b;
+		nv->port_name[5] = 0x1c;
+		nv->port_name[6] = 0x55;
+		nv->port_name[7] = 0x86;
+		nv->node_name[0] = 0x20;
+		nv->node_name[1] = 0x00;
+		nv->node_name[2] = 0x00;
+		nv->node_name[3] = 0xe0;
+		nv->node_name[4] = 0x8b;
+		nv->node_name[5] = 0x1c;
+		nv->node_name[6] = 0x55;
+		nv->node_name[7] = 0x86;
+		nv->login_retry_count = cpu_to_le16(8);
+		nv->interrupt_delay_timer = cpu_to_le16(0);
+		nv->login_timeout = cpu_to_le16(0);
+		nv->firmware_options_1 =
+		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+		nv->firmware_options_2 = cpu_to_le32(2 << 4);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		nv->firmware_options_3 = cpu_to_le32(2 << 13);
+		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
+		nv->efi_parameters = cpu_to_le32(0);
+		nv->reset_delay = 5;
+		nv->max_luns_per_target = cpu_to_le16(128);
+		nv->port_down_retry_count = cpu_to_le16(30);
+		nv->link_down_timeout = cpu_to_le16(180);
+		nv->enode_mac[0] = 0x00;
+		nv->enode_mac[1] = 0xC0;
+		nv->enode_mac[2] = 0xDD;
+		nv->enode_mac[3] = 0x04;
+		nv->enode_mac[4] = 0x05;
+		nv->enode_mac[5] = 0x06 + ha->port_no + 1;
+
+		rval = 1;
+	}
+
+	if (IS_T10_PI_CAPABLE(ha))
+		nv->frame_payload_size &= ~7;
+
+	qlt_81xx_config_nvram_stage1(vha, nv);
+
+	/* Reset Initialization control block */
+	memset(icb, 0, ha->init_cb_size);
+
+	/* Copy 1st segment. */
+	dptr1 = (uint8_t *)icb;
+	dptr2 = (uint8_t *)&nv->version;
+	cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	icb->login_retry_count = nv->login_retry_count;
+
+	/* Copy 2nd segment. */
+	dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
+	dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
+	cnt = (uint8_t *)&icb->reserved_5 -
+	    (uint8_t *)&icb->interrupt_delay_timer;
+	while (cnt--)
+		*dptr1++ = *dptr2++;
+
+	memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
+	/* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
+	if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
+		icb->enode_mac[0] = 0x00;
+		icb->enode_mac[1] = 0xC0;
+		icb->enode_mac[2] = 0xDD;
+		icb->enode_mac[3] = 0x04;
+		icb->enode_mac[4] = 0x05;
+		icb->enode_mac[5] = 0x06 + ha->port_no + 1;
+	}
+
+	/* Use extended-initialization control block. */
+	memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
+
+	/*
+	 * Setup driver NVRAM options.
+	 */
+	qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
+	    "QLE8XXX");
+
+	qlt_81xx_config_nvram_stage2(vha, icb);
+
+	/* Use alternate WWN? */
+	if (nv->host_p & cpu_to_le32(BIT_15)) {
+		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
+		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
+	}
+
+	/* Prepare nodename */
+	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
+		/*
+		 * Firmware will apply the following mask if the nodename was
+		 * not provided.
+		 */
+		memcpy(icb->node_name, icb->port_name, WWN_SIZE);
+		icb->node_name[0] &= 0xF0;
+	}
+
+	/* Set host adapter parameters. */
+	ha->flags.disable_risc_code_load = 0;
+	ha->flags.enable_lip_reset = 0;
+	ha->flags.enable_lip_full_login =
+	    le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
+	ha->flags.enable_target_reset =
+	    le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
+	ha->flags.enable_led_scheme = 0;
+	ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
+
+	ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
+	    (BIT_6 | BIT_5 | BIT_4)) >> 4;
+
+	/* save HBA serial number */
+	ha->serial0 = icb->port_name[5];
+	ha->serial1 = icb->port_name[6];
+	ha->serial2 = icb->port_name[7];
+	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
+
+	icb->execution_throttle = cpu_to_le16(0xFFFF);
+
+	ha->retry_count = le16_to_cpu(nv->login_retry_count);
+
+	/* Set minimum login_timeout to 4 seconds. */
+	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
+		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
+	if (le16_to_cpu(nv->login_timeout) < 4)
+		nv->login_timeout = cpu_to_le16(4);
+	ha->login_timeout = le16_to_cpu(nv->login_timeout);
+
+	/* Set minimum RATOV to 100 tenths of a second. */
+	ha->r_a_tov = 100;
+
+	ha->loop_reset_delay = nv->reset_delay;
+
+	/* Link Down Timeout = 0:
+	 *
+	 *	When Port Down timer expires we will start returning
+	 *	I/O's to OS with "DID_NO_CONNECT".
+	 *
+	 * Link Down Timeout != 0:
+	 *
+	 *	 The driver waits for the link to come up after link down
+	 *	 before returning I/Os to OS with "DID_NO_CONNECT".
+	 */
+	if (le16_to_cpu(nv->link_down_timeout) == 0) {
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
+	} else {
+		ha->link_down_timeout =	le16_to_cpu(nv->link_down_timeout);
+		ha->loop_down_abort_time =
+		    (LOOP_DOWN_TIME - ha->link_down_timeout);
+	}
+
+	/* Need enough time to try and get the port back. */
+	ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
+	if (qlport_down_retry)
+		ha->port_down_retry_count = qlport_down_retry;
+
+	/* Set login_retry_count */
+	ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
+	if (ha->port_down_retry_count ==
+	    le16_to_cpu(nv->port_down_retry_count) &&
+	    ha->port_down_retry_count > 3)
+		ha->login_retry_count = ha->port_down_retry_count;
+	else if (ha->port_down_retry_count > (int)ha->login_retry_count)
+		ha->login_retry_count = ha->port_down_retry_count;
+	if (ql2xloginretrycount)
+		ha->login_retry_count = ql2xloginretrycount;
+
+	/* if not running MSI-X we need handshaking on interrupts */
+	if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
+		icb->firmware_options_2 |= cpu_to_le32(BIT_22);
+
+	/* Enable ZIO. */
+	if (!vha->flags.init_done) {
+		ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
+		    (BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
+		    le16_to_cpu(icb->interrupt_delay_timer): 2;
+	}
+	icb->firmware_options_2 &= cpu_to_le32(
+	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
+	if (ha->zio_mode != QLA_ZIO_DISABLED) {
+		ha->zio_mode = QLA_ZIO_MODE_6;
+
+		ql_log(ql_log_info, vha, 0x0075,
+		    "ZIO mode %d enabled; timer delay (%d us).\n",
+		    ha->zio_mode,
+		    ha->zio_timer * 100);
+
+		icb->firmware_options_2 |= cpu_to_le32(
+		    (uint32_t)ha->zio_mode);
+		icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
+	}
+
+	 /* enable RIDA Format2 */
+	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
+		icb->firmware_options_3 |= BIT_0;
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0076,
+		    "NVRAM configuration failed.\n");
+	}
+	return (rval);
+}
+
+int
+qla82xx_restart_isp(scsi_qla_host_t *vha)
+{
+	int status, rval;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+	struct scsi_qla_host *vp;
+	unsigned long flags;
+
+	status = qla2x00_init_rings(vha);
+	if (!status) {
+		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+		ha->flags.chip_reset_done = 1;
+
+		status = qla2x00_fw_ready(vha);
+		if (!status) {
+			/* Issue a marker after FW becomes ready. */
+			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
+			vha->flags.online = 1;
+			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		}
+
+		/* if no cable then assume it's good */
+		if ((vha->device_flags & DFLG_NO_CABLE))
+			status = 0;
+	}
+
+	if (!status) {
+		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+		if (!atomic_read(&vha->loop_down_timer)) {
+			/*
+			 * Issue marker command only when we are going
+			 * to start the I/O .
+			 */
+			vha->marker_needed = 1;
+		}
+
+		ha->isp_ops->enable_intrs(ha);
+
+		ha->isp_abort_cnt = 0;
+		clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+
+		/* Update the firmware version */
+		status = qla82xx_check_md_needed(vha);
+
+		if (ha->fce) {
+			ha->flags.fce_enabled = 1;
+			memset(ha->fce, 0,
+			    fce_calc_size(ha->fce_bufs));
+			rval = qla2x00_enable_fce_trace(vha,
+			    ha->fce_dma, ha->fce_bufs, ha->fce_mb,
+			    &ha->fce_bufs);
+			if (rval) {
+				ql_log(ql_log_warn, vha, 0x8001,
+				    "Unable to reinitialize FCE (%d).\n",
+				    rval);
+				ha->flags.fce_enabled = 0;
+			}
+		}
+
+		if (ha->eft) {
+			memset(ha->eft, 0, EFT_SIZE);
+			rval = qla2x00_enable_eft_trace(vha,
+			    ha->eft_dma, EFT_NUM_BUFFERS);
+			if (rval) {
+				ql_log(ql_log_warn, vha, 0x8010,
+				    "Unable to reinitialize EFT (%d).\n",
+				    rval);
+			}
+		}
+	}
+
+	if (!status) {
+		ql_dbg(ql_dbg_taskm, vha, 0x8011,
+		    "qla82xx_restart_isp succeeded.\n");
+
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		list_for_each_entry(vp, &ha->vp_list, list) {
+			if (vp->vp_idx) {
+				atomic_inc(&vp->vref_count);
+				spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+				qla2x00_vp_abort_isp(vp);
+
+				spin_lock_irqsave(&ha->vport_slock, flags);
+				atomic_dec(&vp->vref_count);
+			}
+		}
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	} else {
+		ql_log(ql_log_warn, vha, 0x8016,
+		    "qla82xx_restart_isp **** FAILED ****.\n");
+	}
+
+	return status;
+}
+
+void
+qla81xx_update_fw_options(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/*  Hold status IOCBs until ABTS response received. */
+	if (ql2xfwholdabts)
+		ha->fw_options[3] |= BIT_12;
+
+	/* Set Retry FLOGI in case of P2P connection */
+	if (ha->operating_mode == P2P) {
+		ha->fw_options[2] |= BIT_3;
+		ql_dbg(ql_dbg_disc, vha, 0x2103,
+		    "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
+			__func__, ha->fw_options[2]);
+	}
+
+	/* Move PUREX, ABTS RX & RIDA to ATIOQ */
+	if (ql2xmvasynctoatio) {
+		if (qla_tgt_mode_enabled(vha) ||
+		    qla_dual_mode_enabled(vha))
+			ha->fw_options[2] |= BIT_11;
+		else
+			ha->fw_options[2] &= ~BIT_11;
+	}
+
+	if (qla_tgt_mode_enabled(vha) ||
+	    qla_dual_mode_enabled(vha)) {
+		/* FW auto send SCSI status during */
+		ha->fw_options[1] |= BIT_8;
+		ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
+
+		/* FW perform Exchange validation */
+		ha->fw_options[2] |= BIT_4;
+	} else {
+		ha->fw_options[1]  &= ~BIT_8;
+		ha->fw_options[10] &= 0x00ff;
+
+		ha->fw_options[2] &= ~BIT_4;
+	}
+
+	if (ql2xetsenable) {
+		/* Enable ETS Burst. */
+		memset(ha->fw_options, 0, sizeof(ha->fw_options));
+		ha->fw_options[2] |= BIT_9;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x00e9,
+	    "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+	    __func__, ha->fw_options[1], ha->fw_options[2],
+	    ha->fw_options[3], vha->host->active_mode);
+
+	qla2x00_set_fw_options(vha, ha->fw_options);
+}
+
+/*
+ * qla24xx_get_fcp_prio
+ *	Gets the fcp cmd priority value for the logged in port.
+ *	Looks for a match of the port descriptors within
+ *	each of the fcp prio config entries. If a match is found,
+ *	the tag (priority) value is returned.
+ *
+ * Input:
+ *	vha = scsi host structure pointer.
+ *	fcport = port structure pointer.
+ *
+ * Return:
+ *	non-zero (if found)
+ *	-1 (if not found)
+ *
+ * Context:
+ * 	Kernel context
+ */
+static int
+qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int i, entries;
+	uint8_t pid_match, wwn_match;
+	int priority;
+	uint32_t pid1, pid2;
+	uint64_t wwn1, wwn2;
+	struct qla_fcp_prio_entry *pri_entry;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
+		return -1;
+
+	priority = -1;
+	entries = ha->fcp_prio_cfg->num_entries;
+	pri_entry = &ha->fcp_prio_cfg->entry[0];
+
+	for (i = 0; i < entries; i++) {
+		pid_match = wwn_match = 0;
+
+		if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
+			pri_entry++;
+			continue;
+		}
+
+		/* check source pid for a match */
+		if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
+			pid1 = pri_entry->src_pid & INVALID_PORT_ID;
+			pid2 = vha->d_id.b24 & INVALID_PORT_ID;
+			if (pid1 == INVALID_PORT_ID)
+				pid_match++;
+			else if (pid1 == pid2)
+				pid_match++;
+		}
+
+		/* check destination pid for a match */
+		if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
+			pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
+			pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
+			if (pid1 == INVALID_PORT_ID)
+				pid_match++;
+			else if (pid1 == pid2)
+				pid_match++;
+		}
+
+		/* check source WWN for a match */
+		if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
+			wwn1 = wwn_to_u64(vha->port_name);
+			wwn2 = wwn_to_u64(pri_entry->src_wwpn);
+			if (wwn2 == (uint64_t)-1)
+				wwn_match++;
+			else if (wwn1 == wwn2)
+				wwn_match++;
+		}
+
+		/* check destination WWN for a match */
+		if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
+			wwn1 = wwn_to_u64(fcport->port_name);
+			wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
+			if (wwn2 == (uint64_t)-1)
+				wwn_match++;
+			else if (wwn1 == wwn2)
+				wwn_match++;
+		}
+
+		if (pid_match == 2 || wwn_match == 2) {
+			/* Found a matching entry */
+			if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
+				priority = pri_entry->tag;
+			break;
+		}
+
+		pri_entry++;
+	}
+
+	return priority;
+}
+
+/*
+ * qla24xx_update_fcport_fcp_prio
+ *	Activates fcp priority for the logged in fc port
+ *
+ * Input:
+ *	vha = scsi host structure pointer.
+ *	fcp = port structure pointer.
+ *
+ * Return:
+ *	QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+	int ret;
+	int priority;
+	uint16_t mb[5];
+
+	if (fcport->port_type != FCT_TARGET ||
+	    fcport->loop_id == FC_NO_LOOP_ID)
+		return QLA_FUNCTION_FAILED;
+
+	priority = qla24xx_get_fcp_prio(vha, fcport);
+	if (priority < 0)
+		return QLA_FUNCTION_FAILED;
+
+	if (IS_P3P_TYPE(vha->hw)) {
+		fcport->fcp_prio = priority & 0xf;
+		return QLA_SUCCESS;
+	}
+
+	ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
+	if (ret == QLA_SUCCESS) {
+		if (fcport->fcp_prio != priority)
+			ql_dbg(ql_dbg_user, vha, 0x709e,
+			    "Updated FCP_CMND priority - value=%d loop_id=%d "
+			    "port_id=%02x%02x%02x.\n", priority,
+			    fcport->loop_id, fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+		fcport->fcp_prio = priority & 0xf;
+	} else
+		ql_dbg(ql_dbg_user, vha, 0x704f,
+		    "Unable to update FCP_CMND priority - ret=0x%x for "
+		    "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa);
+	return  ret;
+}
+
+/*
+ * qla24xx_update_all_fcp_prio
+ *	Activates fcp priority for all the logged in ports
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *
+ * Return:
+ *	QLA_SUCCESS or QLA_FUNCTION_FAILED
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
+{
+	int ret;
+	fc_port_t *fcport;
+
+	ret = QLA_FUNCTION_FAILED;
+	/* We need to set priority for all logged in ports */
+	list_for_each_entry(fcport, &vha->vp_fcports, list)
+		ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
+
+	return ret;
+}
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
+	int vp_idx, bool startqp)
+{
+	int rsp_id = 0;
+	int  req_id = 0;
+	int i;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t qpair_id = 0;
+	struct qla_qpair *qpair = NULL;
+	struct qla_msix_entry *msix;
+
+	if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+		ql_log(ql_log_warn, vha, 0x00181,
+		    "FW/Driver is not multi-queue capable.\n");
+		return NULL;
+	}
+
+	if (ql2xmqsupport) {
+		qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+		if (qpair == NULL) {
+			ql_log(ql_log_warn, vha, 0x0182,
+			    "Failed to allocate memory for queue pair.\n");
+			return NULL;
+		}
+		memset(qpair, 0, sizeof(struct qla_qpair));
+
+		qpair->hw = vha->hw;
+		qpair->vha = vha;
+		qpair->qp_lock_ptr = &qpair->qp_lock;
+		spin_lock_init(&qpair->qp_lock);
+		qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
+
+		/* Assign available que pair id */
+		mutex_lock(&ha->mq_lock);
+		qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+		if (ha->num_qpairs >= ha->max_qpairs) {
+			mutex_unlock(&ha->mq_lock);
+			ql_log(ql_log_warn, vha, 0x0183,
+			    "No resources to create additional q pair.\n");
+			goto fail_qid_map;
+		}
+		ha->num_qpairs++;
+		set_bit(qpair_id, ha->qpair_qid_map);
+		ha->queue_pair_map[qpair_id] = qpair;
+		qpair->id = qpair_id;
+		qpair->vp_idx = vp_idx;
+		qpair->fw_started = ha->flags.fw_started;
+		INIT_LIST_HEAD(&qpair->hints_list);
+		INIT_LIST_HEAD(&qpair->nvme_done_list);
+		qpair->chip_reset = ha->base_qpair->chip_reset;
+		qpair->enable_class_2 = ha->base_qpair->enable_class_2;
+		qpair->enable_explicit_conf =
+		    ha->base_qpair->enable_explicit_conf;
+
+		for (i = 0; i < ha->msix_count; i++) {
+			msix = &ha->msix_entries[i];
+			if (msix->in_use)
+				continue;
+			qpair->msix = msix;
+			ql_dbg(ql_dbg_multiq, vha, 0xc00f,
+			    "Vector %x selected for qpair\n", msix->vector);
+			break;
+		}
+		if (!qpair->msix) {
+			ql_log(ql_log_warn, vha, 0x0184,
+			    "Out of MSI-X vectors!.\n");
+			goto fail_msix;
+		}
+
+		qpair->msix->in_use = 1;
+		list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+		qpair->pdev = ha->pdev;
+		if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+			qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
+
+		mutex_unlock(&ha->mq_lock);
+
+		/* Create response queue first */
+		rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
+		if (!rsp_id) {
+			ql_log(ql_log_warn, vha, 0x0185,
+			    "Failed to create response queue.\n");
+			goto fail_rsp;
+		}
+
+		qpair->rsp = ha->rsp_q_map[rsp_id];
+
+		/* Create request queue */
+		req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
+		    startqp);
+		if (!req_id) {
+			ql_log(ql_log_warn, vha, 0x0186,
+			    "Failed to create request queue.\n");
+			goto fail_req;
+		}
+
+		qpair->req = ha->req_q_map[req_id];
+		qpair->rsp->req = qpair->req;
+		qpair->rsp->qpair = qpair;
+		/* init qpair to this cpu. Will adjust at run time. */
+		qla_cpu_update(qpair, smp_processor_id());
+
+		if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+			if (ha->fw_attributes & BIT_4)
+				qpair->difdix_supported = 1;
+		}
+
+		qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+		if (!qpair->srb_mempool) {
+			ql_log(ql_log_warn, vha, 0xd036,
+			    "Failed to create srb mempool for qpair %d\n",
+			    qpair->id);
+			goto fail_mempool;
+		}
+
+		/* Mark as online */
+		qpair->online = 1;
+
+		if (!vha->flags.qpairs_available)
+			vha->flags.qpairs_available = 1;
+
+		ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+		    "Request/Response queue pair created, id %d\n",
+		    qpair->id);
+		ql_dbg(ql_dbg_init, vha, 0x0187,
+		    "Request/Response queue pair created, id %d\n",
+		    qpair->id);
+	}
+	return qpair;
+
+fail_mempool:
+fail_req:
+	qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+	mutex_lock(&ha->mq_lock);
+	qpair->msix->in_use = 0;
+	list_del(&qpair->qp_list_elem);
+	if (list_empty(&vha->qp_list))
+		vha->flags.qpairs_available = 0;
+fail_msix:
+	ha->queue_pair_map[qpair_id] = NULL;
+	clear_bit(qpair_id, ha->qpair_qid_map);
+	ha->num_qpairs--;
+	mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+	kfree(qpair);
+	return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = qpair->hw;
+
+	qpair->delete_in_progress = 1;
+
+	ret = qla25xx_delete_req_que(vha, qpair->req);
+	if (ret != QLA_SUCCESS)
+		goto fail;
+
+	ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+	if (ret != QLA_SUCCESS)
+		goto fail;
+
+	mutex_lock(&ha->mq_lock);
+	ha->queue_pair_map[qpair->id] = NULL;
+	clear_bit(qpair->id, ha->qpair_qid_map);
+	ha->num_qpairs--;
+	list_del(&qpair->qp_list_elem);
+	if (list_empty(&vha->qp_list)) {
+		vha->flags.qpairs_available = 0;
+		vha->flags.qpairs_req_created = 0;
+		vha->flags.qpairs_rsp_created = 0;
+	}
+	mempool_destroy(qpair->srb_mempool);
+	kfree(qpair);
+	mutex_unlock(&ha->mq_lock);
+
+	return QLA_SUCCESS;
+fail:
+	return ret;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_inline.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_inline.h
new file mode 100644
index 0000000..3f5a0f0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_inline.h
@@ -0,0 +1,371 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include "qla_target.h"
+/**
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+static inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > 1) {
+		iocbs += (dsds - 1) / 5;
+		if ((dsds - 1) % 5)
+			iocbs++;
+	}
+	return iocbs;
+}
+
+/*
+ * qla2x00_debounce_register
+ *      Debounce register.
+ *
+ * Input:
+ *      port = register address.
+ *
+ * Returns:
+ *      register value.
+ */
+static __inline__ uint16_t
+qla2x00_debounce_register(volatile uint16_t __iomem *addr)
+{
+	volatile uint16_t first;
+	volatile uint16_t second;
+
+	do {
+		first = RD_REG_WORD(addr);
+		barrier();
+		cpu_relax();
+		second = RD_REG_WORD(addr);
+	} while (first != second);
+
+	return (first);
+}
+
+static inline void
+qla2x00_poll(struct rsp_que *rsp)
+{
+	unsigned long flags;
+	struct qla_hw_data *ha = rsp->hw;
+	local_irq_save(flags);
+	if (IS_P3P_TYPE(ha))
+		qla82xx_poll(0, rsp);
+	else
+		ha->isp_ops->intr_handler(0, rsp);
+	local_irq_restore(flags);
+}
+
+static inline uint8_t *
+host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
+{
+       uint32_t *ifcp = (uint32_t *) fcp;
+       uint32_t *ofcp = (uint32_t *) fcp;
+       uint32_t iter = bsize >> 2;
+
+       for (; iter ; iter--)
+               *ofcp++ = swab32(*ifcp++);
+
+       return fcp;
+}
+
+static inline void
+host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
+{
+	uint32_t *isrc = (uint32_t *) src;
+	__le32 *odest = (__le32 *) dst;
+	uint32_t iter = bsize >> 2;
+
+	for ( ; iter--; isrc++)
+		*odest++ = cpu_to_le32(*isrc);
+}
+
+static inline void
+qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
+{
+	int i;
+
+	if (IS_FWI2_CAPABLE(ha))
+		return;
+
+	for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
+		set_bit(i, ha->loop_id_map);
+	set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
+	set_bit(BROADCAST, ha->loop_id_map);
+}
+
+static inline int
+qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	if (IS_FWI2_CAPABLE(ha))
+		return (loop_id > NPH_LAST_HANDLE);
+
+	return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+	    loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
+}
+
+static inline void
+qla2x00_clear_loop_id(fc_port_t *fcport) {
+	struct qla_hw_data *ha = fcport->vha->hw;
+
+	if (fcport->loop_id == FC_NO_LOOP_ID ||
+	    qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
+		return;
+
+	clear_bit(fcport->loop_id, ha->loop_id_map);
+	fcport->loop_id = FC_NO_LOOP_ID;
+}
+
+static inline void
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
+{
+	struct dsd_dma *dsd, *tdsd;
+
+	/* clean up allocated prev pool */
+	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
+		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
+		    dsd->dsd_list_dma);
+		list_del(&dsd->list);
+		kfree(dsd);
+	}
+	INIT_LIST_HEAD(&ctx->dsd_list);
+}
+
+static inline void
+qla2x00_set_fcport_state(fc_port_t *fcport, int state)
+{
+	int old_state;
+
+	old_state = atomic_read(&fcport->state);
+	atomic_set(&fcport->state, state);
+
+	/* Don't print state transitions during initial allocation of fcport */
+	if (old_state && old_state != state) {
+		ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+		    "FCPort %8phC state transitioned from %s to %s - "
+			"portid=%02x%02x%02x.\n", fcport->port_name,
+		    port_state_str[old_state], port_state_str[state],
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa);
+	}
+}
+
+static inline int
+qla2x00_hba_err_chk_enabled(srb_t *sp)
+{
+	/*
+	 * Uncomment when corresponding SCSI changes are done.
+	 *
+	if (!sp->cmd->prot_chk)
+		return 0;
+	 *
+	 */
+	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
+	case SCSI_PROT_READ_STRIP:
+	case SCSI_PROT_WRITE_INSERT:
+		if (ql2xenablehba_err_chk >= 1)
+			return 1;
+		break;
+	case SCSI_PROT_READ_PASS:
+	case SCSI_PROT_WRITE_PASS:
+		if (ql2xenablehba_err_chk >= 2)
+			return 1;
+		break;
+	case SCSI_PROT_READ_INSERT:
+	case SCSI_PROT_WRITE_STRIP:
+		return 1;
+	}
+	return 0;
+}
+
+static inline int
+qla2x00_reset_active(scsi_qla_host_t *vha)
+{
+	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+
+	/* Test appropriate base-vha and vha flags. */
+	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
+	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+}
+
+static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+	srb_t *sp = NULL;
+	uint8_t bail;
+
+	QLA_QPAIR_MARK_BUSY(qpair, bail);
+	if (unlikely(bail))
+		return NULL;
+
+	sp = mempool_alloc(qpair->srb_mempool, flag);
+	if (!sp)
+		goto done;
+
+	memset(sp, 0, sizeof(*sp));
+	sp->fcport = fcport;
+	sp->iocbs = 1;
+	sp->vha = qpair->vha;
+	INIT_LIST_HEAD(&sp->elem);
+
+done:
+	if (!sp)
+		QLA_QPAIR_MARK_NOT_BUSY(qpair);
+	return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+	mempool_free(sp, qpair->srb_mempool);
+	QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
+static inline srb_t *
+qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
+{
+	srb_t *sp = NULL;
+	uint8_t bail;
+
+	QLA_VHA_MARK_BUSY(vha, bail);
+	if (unlikely(bail))
+		return NULL;
+
+	sp = mempool_alloc(vha->hw->srb_mempool, flag);
+	if (!sp)
+		goto done;
+
+	memset(sp, 0, sizeof(*sp));
+	sp->fcport = fcport;
+	sp->cmd_type = TYPE_SRB;
+	sp->iocbs = 1;
+	sp->vha = vha;
+done:
+	if (!sp)
+		QLA_VHA_MARK_NOT_BUSY(vha);
+	return sp;
+}
+
+static inline void
+qla2x00_rel_sp(srb_t *sp)
+{
+	QLA_VHA_MARK_NOT_BUSY(sp->vha);
+	mempool_free(sp, sp->vha->hw->srb_mempool);
+}
+
+static inline void
+qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+{
+	init_timer(&sp->u.iocb_cmd.timer);
+	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+	sp->u.iocb_cmd.timer.data = (unsigned long)sp;
+	sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
+	add_timer(&sp->u.iocb_cmd.timer);
+	sp->free = qla2x00_sp_free;
+	if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
+		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+	if (sp->type == SRB_ELS_DCMD)
+		init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
+}
+
+static inline int
+qla2x00_gid_list_size(struct qla_hw_data *ha)
+{
+	if (IS_QLAFX00(ha))
+		return sizeof(uint32_t) * 32;
+	else
+		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+}
+
+static inline void
+qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
+{
+	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+		complete(&ha->mbx_intr_comp);
+	}
+}
+
+static inline void
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+{
+	if (retry_delay)
+		fcport->retry_delay_timestamp = jiffies +
+		    (retry_delay * HZ / 10);
+}
+
+static inline bool
+qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
+{
+	if (qla_ini_mode_enabled(vha) &&
+	    (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+		return true;
+	else if (qla_tgt_mode_enabled(vha) &&
+	    (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+		return true;
+	else if (qla_dual_mode_enabled(vha) &&
+	    ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+		return true;
+	else
+		return false;
+}
+
+static inline void
+qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
+{
+	qpair->cpuid = cpuid;
+
+	if (!list_empty(&qpair->hints_list)) {
+		struct qla_qpair_hint *h;
+
+		list_for_each_entry(h, &qpair->hints_list, hint_elem)
+			h->cpuid = qpair->cpuid;
+	}
+}
+
+static inline struct qla_qpair_hint *
+qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
+{
+	struct qla_qpair_hint *h;
+	u16 i;
+
+	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
+		h = &tgt->qphints[i];
+		if (h->qpair == qpair)
+			return h;
+	}
+
+	return NULL;
+}
+
+static inline void
+qla_83xx_start_iocbs(struct qla_qpair *qpair)
+{
+	struct req_que *req = qpair->req;
+
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_iocb.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_iocb.c
new file mode 100644
index 0000000..85cb4e3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_iocb.c
@@ -0,0 +1,3458 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+
+#include <scsi/scsi_tcq.h>
+
+/**
+ * qla2x00_get_cmd_direction() - Determine control_flag data direction.
+ * @cmd: SCSI command
+ *
+ * Returns the proper CF_* direction based on CDB.
+ */
+static inline uint16_t
+qla2x00_get_cmd_direction(srb_t *sp)
+{
+	uint16_t cflags;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host *vha = sp->vha;
+
+	cflags = 0;
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		cflags = CF_WRITE;
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_requests++;
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		cflags = CF_READ;
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_requests++;
+	}
+	return (cflags);
+}
+
+/**
+ * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
+ * Continuation Type 0 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+uint16_t
+qla2x00_calc_iocbs_32(uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > 3) {
+		iocbs += (dsds - 3) / 7;
+		if ((dsds - 3) % 7)
+			iocbs++;
+	}
+	return (iocbs);
+}
+
+/**
+ * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+uint16_t
+qla2x00_calc_iocbs_64(uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > 2) {
+		iocbs += (dsds - 2) / 5;
+		if ((dsds - 2) % 5)
+			iocbs++;
+	}
+	return (iocbs);
+}
+
+/**
+ * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
+ * @ha: HA context
+ *
+ * Returns a pointer to the Continuation Type 0 IOCB packet.
+ */
+static inline cont_entry_t *
+qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
+{
+	cont_entry_t *cont_pkt;
+	struct req_que *req = vha->req;
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+
+	cont_pkt = (cont_entry_t *)req->ring_ptr;
+
+	/* Load packet defaults. */
+	*((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
+
+	return (cont_pkt);
+}
+
+/**
+ * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
+ * @ha: HA context
+ *
+ * Returns a pointer to the continuation type 1 IOCB packet.
+ */
+static inline cont_a64_entry_t *
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
+{
+	cont_a64_entry_t *cont_pkt;
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+
+	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+	/* Load packet defaults. */
+	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
+	    cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
+	    cpu_to_le32(CONTINUE_A64_TYPE);
+
+	return (cont_pkt);
+}
+
+inline int
+qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
+{
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
+
+	/* We always use DIFF Bundling for best performance */
+	*fw_prot_opts = 0;
+
+	/* Translate SCSI opcode to a protection opcode */
+	switch (scsi_get_prot_op(cmd)) {
+	case SCSI_PROT_READ_STRIP:
+		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
+		break;
+	case SCSI_PROT_WRITE_INSERT:
+		*fw_prot_opts |= PO_MODE_DIF_INSERT;
+		break;
+	case SCSI_PROT_READ_INSERT:
+		*fw_prot_opts |= PO_MODE_DIF_INSERT;
+		break;
+	case SCSI_PROT_WRITE_STRIP:
+		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
+		break;
+	case SCSI_PROT_READ_PASS:
+	case SCSI_PROT_WRITE_PASS:
+		if (guard & SHOST_DIX_GUARD_IP)
+			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
+		else
+			*fw_prot_opts |= PO_MODE_DIF_PASS;
+		break;
+	default:	/* Normal Request */
+		*fw_prot_opts |= PO_MODE_DIF_PASS;
+		break;
+	}
+
+	return scsi_prot_sg_count(cmd);
+}
+
+/*
+ * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
+ * capable IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 2 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
+    uint16_t tot_dsds)
+{
+	uint16_t	avail_dsds;
+	uint32_t	*cur_dsd;
+	scsi_qla_host_t	*vha;
+	struct scsi_cmnd *cmd;
+	struct scatterlist *sg;
+	int i;
+
+	cmd = GET_CMD_SP(sp);
+
+	/* Update entry type to indicate Command Type 2 IOCB */
+	*((uint32_t *)(&cmd_pkt->entry_type)) =
+	    cpu_to_le32(COMMAND_TYPE);
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = cpu_to_le32(0);
+		return;
+	}
+
+	vha = sp->vha;
+	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
+
+	/* Three DSDs are available in the Command Type 2 IOCB */
+	avail_dsds = 3;
+	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+
+	/* Load data segments */
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+		cont_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Seven DSDs are available in the Continuation
+			 * Type 0 IOCB.
+			 */
+			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
+			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
+			avail_dsds = 7;
+		}
+
+		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+}
+
+/**
+ * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
+ * capable IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
+    uint16_t tot_dsds)
+{
+	uint16_t	avail_dsds;
+	uint32_t	*cur_dsd;
+	scsi_qla_host_t	*vha;
+	struct scsi_cmnd *cmd;
+	struct scatterlist *sg;
+	int i;
+
+	cmd = GET_CMD_SP(sp);
+
+	/* Update entry type to indicate Command Type 3 IOCB */
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = cpu_to_le32(0);
+		return;
+	}
+
+	vha = sp->vha;
+	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
+
+	/* Two DSDs are available in the Command Type 3 IOCB */
+	avail_dsds = 2;
+	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+
+	/* Load data segments */
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+		dma_addr_t	sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Continuation
+			 * Type 1 IOCB.
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+}
+
+/**
+ * qla2x00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2x00_start_scsi(srb_t *sp)
+{
+	int		nseg;
+	unsigned long   flags;
+	scsi_qla_host_t	*vha;
+	struct scsi_cmnd *cmd;
+	uint32_t	*clr_ptr;
+	uint32_t        index;
+	uint32_t	handle;
+	cmd_entry_t	*cmd_pkt;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct device_reg_2xxx __iomem *reg;
+	struct qla_hw_data *ha;
+	struct req_que *req;
+	struct rsp_que *rsp;
+
+	/* Setup device pointers. */
+	vha = sp->vha;
+	ha = vha->hw;
+	reg = &ha->iobase->isp;
+	cmd = GET_CMD_SP(sp);
+	req = ha->req_q_map[0];
+	rsp = ha->rsp_q_map[0];
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		    QLA_SUCCESS) {
+			return (QLA_FUNCTION_FAILED);
+		}
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+
+	/* Calculate the number of request entries needed. */
+	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+			    (req->ring_index - cnt);
+		/* If still no head room then bail out */
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	/* Build command packet */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
+	cmd_pkt->handle = handle;
+	/* Zero out remaining portion of packet. */
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+	/* Set target ID and LUN number*/
+	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
+	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
+	cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
+
+	/* Load SCSI command packet. */
+	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
+	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+	/* Build IOCB segments */
+	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
+
+	/* Set total data segment count. */
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
+
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla2x00_process_response_queue(rsp);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return (QLA_SUCCESS);
+
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return (QLA_FUNCTION_FAILED);
+}
+
+/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+	struct qla_hw_data *ha = vha->hw;
+	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
+
+	if (IS_P3P_TYPE(ha)) {
+		qla82xx_start_iocbs(vha);
+	} else {
+		/* Adjust ring index. */
+		req->ring_index++;
+		if (req->ring_index == req->length) {
+			req->ring_index = 0;
+			req->ring_ptr = req->ring;
+		} else
+			req->ring_ptr++;
+
+		/* Set chip new ring index. */
+		if (ha->mqenable || IS_QLA27XX(ha)) {
+			WRT_REG_DWORD(req->req_q_in, req->ring_index);
+		} else if (IS_QLA83XX(ha)) {
+			WRT_REG_DWORD(req->req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+		} else if (IS_QLAFX00(ha)) {
+			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+		} else if (IS_FWI2_CAPABLE(ha)) {
+			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+		} else {
+			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+				req->ring_index);
+			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+		}
+	}
+}
+
+/**
+ * qla2x00_marker() - Send a marker IOCB to the firmware.
+ * @ha: HA context
+ * @loop_id: loop ID
+ * @lun: LUN
+ * @type: marker modifier
+ *
+ * Can be called from both normal and interrupt context.
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+			struct rsp_que *rsp, uint16_t loop_id,
+			uint64_t lun, uint8_t type)
+{
+	mrk_entry_t *mrk;
+	struct mrk_entry_24xx *mrk24 = NULL;
+
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	req = ha->req_q_map[0];
+	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
+	if (mrk == NULL) {
+		ql_log(ql_log_warn, base_vha, 0x3026,
+		    "Failed to allocate Marker IOCB.\n");
+
+		return (QLA_FUNCTION_FAILED);
+	}
+
+	mrk->entry_type = MARKER_TYPE;
+	mrk->modifier = type;
+	if (type != MK_SYNC_ALL) {
+		if (IS_FWI2_CAPABLE(ha)) {
+			mrk24 = (struct mrk_entry_24xx *) mrk;
+			mrk24->nport_handle = cpu_to_le16(loop_id);
+			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
+			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
+			mrk24->vp_index = vha->vp_idx;
+			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
+		} else {
+			SET_TARGET_ID(ha, mrk->target, loop_id);
+			mrk->lun = cpu_to_le16((uint16_t)lun);
+		}
+	}
+	wmb();
+
+	qla2x00_start_iocbs(vha, req);
+
+	return (QLA_SUCCESS);
+}
+
+int
+qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+		struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
+		uint8_t type)
+{
+	int ret;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+	return (ret);
+}
+
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+	if (ha_locked) {
+		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+					MK_SYNC_ALL) != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+	} else {
+		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+					MK_SYNC_ALL) != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+	}
+	vha->marker_needed = 0;
+
+	return QLA_SUCCESS;
+}
+
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+	uint16_t tot_dsds)
+{
+	uint32_t *cur_dsd = NULL;
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct scsi_cmnd *cmd;
+	struct	scatterlist *cur_seg;
+	uint32_t *dsd_seg;
+	void *next_dsd;
+	uint8_t avail_dsds;
+	uint8_t first_iocb = 1;
+	uint32_t dsd_list_len;
+	struct dsd_dma *dsd_ptr;
+	struct ct6_dsd *ctx;
+
+	cmd = GET_CMD_SP(sp);
+
+	/* Update entry type to indicate Command Type 3 IOCB */
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = cpu_to_le32(0);
+		return 0;
+	}
+
+	vha = sp->vha;
+	ha = vha->hw;
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_requests++;
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_requests++;
+	}
+
+	cur_seg = scsi_sglist(cmd);
+	ctx = GET_CMD_CTX_SP(sp);
+
+	while (tot_dsds) {
+		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+		    QLA_DSDS_PER_IOCB : tot_dsds;
+		tot_dsds -= avail_dsds;
+		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+		    struct dsd_dma, list);
+		next_dsd = dsd_ptr->dsd_addr;
+		list_del(&dsd_ptr->list);
+		ha->gbl_dsd_avail--;
+		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+		ctx->dsd_use_cnt++;
+		ha->gbl_dsd_inuse++;
+
+		if (first_iocb) {
+			first_iocb = 0;
+			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
+		} else {
+			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(dsd_list_len);
+		}
+		cur_dsd = (uint32_t *)next_dsd;
+		while (avail_dsds) {
+			dma_addr_t	sle_dma;
+
+			sle_dma = sg_dma_address(cur_seg);
+			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+			cur_seg = sg_next(cur_seg);
+			avail_dsds--;
+		}
+	}
+
+	/* Null termination */
+	*cur_dsd++ =  0;
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+	return 0;
+}
+
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of dsd list needed to store @dsds.
+ */
+static inline uint16_t
+qla24xx_calc_dsd_lists(uint16_t dsds)
+{
+	uint16_t dsd_lists = 0;
+
+	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+	if (dsds % QLA_DSDS_PER_IOCB)
+		dsd_lists++;
+	return dsd_lists;
+}
+
+
+/**
+ * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
+ * IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
+ */
+inline void
+qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
+	uint16_t tot_dsds, struct req_que *req)
+{
+	uint16_t	avail_dsds;
+	uint32_t	*cur_dsd;
+	scsi_qla_host_t	*vha;
+	struct scsi_cmnd *cmd;
+	struct scatterlist *sg;
+	int i;
+
+	cmd = GET_CMD_SP(sp);
+
+	/* Update entry type to indicate Command Type 3 IOCB */
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = cpu_to_le32(0);
+		return;
+	}
+
+	vha = sp->vha;
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.output_requests++;
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+		vha->qla_stats.input_requests++;
+	}
+
+	/* One DSD is available in the Command Type 3 IOCB */
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+
+	/* Load data segments */
+
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+		dma_addr_t	sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Continuation
+			 * Type 1 IOCB.
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
+			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+}
+
+struct fw_dif_context {
+	uint32_t ref_tag;
+	uint16_t app_tag;
+	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
+	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
+};
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
+    unsigned int protcnt)
+{
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+	switch (scsi_get_prot_type(cmd)) {
+	case SCSI_PROT_DIF_TYPE0:
+		/*
+		 * No check for ql2xenablehba_err_chk, as it would be an
+		 * I/O error if hba tag generation is not done.
+		 */
+		pkt->ref_tag = cpu_to_le32((uint32_t)
+		    (0xffffffff & scsi_get_lba(cmd)));
+
+		if (!qla2x00_hba_err_chk_enabled(sp))
+			break;
+
+		pkt->ref_tag_mask[0] = 0xff;
+		pkt->ref_tag_mask[1] = 0xff;
+		pkt->ref_tag_mask[2] = 0xff;
+		pkt->ref_tag_mask[3] = 0xff;
+		break;
+
+	/*
+	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+	 * match LBA in CDB + N
+	 */
+	case SCSI_PROT_DIF_TYPE2:
+		pkt->app_tag = cpu_to_le16(0);
+		pkt->app_tag_mask[0] = 0x0;
+		pkt->app_tag_mask[1] = 0x0;
+
+		pkt->ref_tag = cpu_to_le32((uint32_t)
+		    (0xffffffff & scsi_get_lba(cmd)));
+
+		if (!qla2x00_hba_err_chk_enabled(sp))
+			break;
+
+		/* enable ALL bytes of the ref tag */
+		pkt->ref_tag_mask[0] = 0xff;
+		pkt->ref_tag_mask[1] = 0xff;
+		pkt->ref_tag_mask[2] = 0xff;
+		pkt->ref_tag_mask[3] = 0xff;
+		break;
+
+	/* For Type 3 protection: 16 bit GUARD only */
+	case SCSI_PROT_DIF_TYPE3:
+		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
+			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
+								0x00;
+		break;
+
+	/*
+	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+	 * 16 bit app tag.
+	 */
+	case SCSI_PROT_DIF_TYPE1:
+		pkt->ref_tag = cpu_to_le32((uint32_t)
+		    (0xffffffff & scsi_get_lba(cmd)));
+		pkt->app_tag = cpu_to_le16(0);
+		pkt->app_tag_mask[0] = 0x0;
+		pkt->app_tag_mask[1] = 0x0;
+
+		if (!qla2x00_hba_err_chk_enabled(sp))
+			break;
+
+		/* enable ALL bytes of the ref tag */
+		pkt->ref_tag_mask[0] = 0xff;
+		pkt->ref_tag_mask[1] = 0xff;
+		pkt->ref_tag_mask[2] = 0xff;
+		pkt->ref_tag_mask[3] = 0xff;
+		break;
+	}
+}
+
+int
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
+	uint32_t *partial)
+{
+	struct scatterlist *sg;
+	uint32_t cumulative_partial, sg_len;
+	dma_addr_t sg_dma_addr;
+
+	if (sgx->num_bytes == sgx->tot_bytes)
+		return 0;
+
+	sg = sgx->cur_sg;
+	cumulative_partial = sgx->tot_partial;
+
+	sg_dma_addr = sg_dma_address(sg);
+	sg_len = sg_dma_len(sg);
+
+	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
+
+	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
+		sgx->dma_len = (blk_sz - cumulative_partial);
+		sgx->tot_partial = 0;
+		sgx->num_bytes += blk_sz;
+		*partial = 0;
+	} else {
+		sgx->dma_len = sg_len - sgx->bytes_consumed;
+		sgx->tot_partial += sgx->dma_len;
+		*partial = 1;
+	}
+
+	sgx->bytes_consumed += sgx->dma_len;
+
+	if (sg_len == sgx->bytes_consumed) {
+		sg = sg_next(sg);
+		sgx->num_sg++;
+		sgx->cur_sg = sg;
+		sgx->bytes_consumed = 0;
+	}
+
+	return 1;
+}
+
+int
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
+	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+{
+	void *next_dsd;
+	uint8_t avail_dsds = 0;
+	uint32_t dsd_list_len;
+	struct dsd_dma *dsd_ptr;
+	struct scatterlist *sg_prot;
+	uint32_t *cur_dsd = dsd;
+	uint16_t	used_dsds = tot_dsds;
+	uint32_t	prot_int; /* protection interval */
+	uint32_t	partial;
+	struct qla2_sgx sgx;
+	dma_addr_t	sle_dma;
+	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
+	struct scsi_cmnd *cmd;
+
+	memset(&sgx, 0, sizeof(struct qla2_sgx));
+	if (sp) {
+		cmd = GET_CMD_SP(sp);
+		prot_int = cmd->device->sector_size;
+
+		sgx.tot_bytes = scsi_bufflen(cmd);
+		sgx.cur_sg = scsi_sglist(cmd);
+		sgx.sp = sp;
+
+		sg_prot = scsi_prot_sglist(cmd);
+	} else if (tc) {
+		prot_int      = tc->blk_sz;
+		sgx.tot_bytes = tc->bufflen;
+		sgx.cur_sg    = tc->sg;
+		sg_prot	      = tc->prot_sg;
+	} else {
+		BUG();
+		return 1;
+	}
+
+	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
+
+		sle_dma = sgx.dma_addr;
+		sle_dma_len = sgx.dma_len;
+alloc_and_fill:
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+					QLA_DSDS_PER_IOCB : used_dsds;
+			dsd_list_len = (avail_dsds + 1) * 12;
+			used_dsds -= avail_dsds;
+
+			/* allocate tracking DS */
+			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+			if (!dsd_ptr)
+				return 1;
+
+			/* allocate new list */
+			dsd_ptr->dsd_addr = next_dsd =
+			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+				&dsd_ptr->dsd_list_dma);
+
+			if (!next_dsd) {
+				/*
+				 * Need to cleanup only this dsd_ptr, rest
+				 * will be done by sp_free_dma()
+				 */
+				kfree(dsd_ptr);
+				return 1;
+			}
+
+			if (sp) {
+				list_add_tail(&dsd_ptr->list,
+				    &((struct crc_context *)
+					    sp->u.scmd.ctx)->dsd_list);
+
+				sp->flags |= SRB_CRC_CTX_DSD_VALID;
+			} else {
+				list_add_tail(&dsd_ptr->list,
+				    &(tc->ctx->dsd_list));
+				*tc->ctx_dsd_alloced = 1;
+			}
+
+
+			/* add new list to cmd iocb or last list */
+			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = dsd_list_len;
+			cur_dsd = (uint32_t *)next_dsd;
+		}
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sle_dma_len);
+		avail_dsds--;
+
+		if (partial == 0) {
+			/* Got a full protection interval */
+			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
+			sle_dma_len = 8;
+
+			tot_prot_dma_len += sle_dma_len;
+			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
+				tot_prot_dma_len = 0;
+				sg_prot = sg_next(sg_prot);
+			}
+
+			partial = 1; /* So as to not re-enter this block */
+			goto alloc_and_fill;
+		}
+	}
+	/* Null termination */
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	return 0;
+}
+
+int
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
+	uint16_t tot_dsds, struct qla_tc_param *tc)
+{
+	void *next_dsd;
+	uint8_t avail_dsds = 0;
+	uint32_t dsd_list_len;
+	struct dsd_dma *dsd_ptr;
+	struct scatterlist *sg, *sgl;
+	uint32_t *cur_dsd = dsd;
+	int	i;
+	uint16_t	used_dsds = tot_dsds;
+	struct scsi_cmnd *cmd;
+
+	if (sp) {
+		cmd = GET_CMD_SP(sp);
+		sgl = scsi_sglist(cmd);
+	} else if (tc) {
+		sgl = tc->sg;
+	} else {
+		BUG();
+		return 1;
+	}
+
+
+	for_each_sg(sgl, sg, tot_dsds, i) {
+		dma_addr_t	sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+					QLA_DSDS_PER_IOCB : used_dsds;
+			dsd_list_len = (avail_dsds + 1) * 12;
+			used_dsds -= avail_dsds;
+
+			/* allocate tracking DS */
+			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+			if (!dsd_ptr)
+				return 1;
+
+			/* allocate new list */
+			dsd_ptr->dsd_addr = next_dsd =
+			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+				&dsd_ptr->dsd_list_dma);
+
+			if (!next_dsd) {
+				/*
+				 * Need to cleanup only this dsd_ptr, rest
+				 * will be done by sp_free_dma()
+				 */
+				kfree(dsd_ptr);
+				return 1;
+			}
+
+			if (sp) {
+				list_add_tail(&dsd_ptr->list,
+				    &((struct crc_context *)
+					    sp->u.scmd.ctx)->dsd_list);
+
+				sp->flags |= SRB_CRC_CTX_DSD_VALID;
+			} else {
+				list_add_tail(&dsd_ptr->list,
+				    &(tc->ctx->dsd_list));
+				*tc->ctx_dsd_alloced = 1;
+			}
+
+			/* add new list to cmd iocb or last list */
+			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = dsd_list_len;
+			cur_dsd = (uint32_t *)next_dsd;
+		}
+		sle_dma = sg_dma_address(sg);
+
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+
+	}
+	/* Null termination */
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	return 0;
+}
+
+int
+qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
+	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+{
+	void *next_dsd;
+	uint8_t avail_dsds = 0;
+	uint32_t dsd_list_len;
+	struct dsd_dma *dsd_ptr;
+	struct scatterlist *sg, *sgl;
+	int	i;
+	struct scsi_cmnd *cmd;
+	uint32_t *cur_dsd = dsd;
+	uint16_t used_dsds = tot_dsds;
+	struct scsi_qla_host *vha;
+
+	if (sp) {
+		cmd = GET_CMD_SP(sp);
+		sgl = scsi_prot_sglist(cmd);
+		vha = sp->vha;
+	} else if (tc) {
+		vha = tc->vha;
+		sgl = tc->prot_sg;
+	} else {
+		BUG();
+		return 1;
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe021,
+		"%s: enter\n", __func__);
+
+	for_each_sg(sgl, sg, tot_dsds, i) {
+		dma_addr_t	sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+						QLA_DSDS_PER_IOCB : used_dsds;
+			dsd_list_len = (avail_dsds + 1) * 12;
+			used_dsds -= avail_dsds;
+
+			/* allocate tracking DS */
+			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+			if (!dsd_ptr)
+				return 1;
+
+			/* allocate new list */
+			dsd_ptr->dsd_addr = next_dsd =
+			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+				&dsd_ptr->dsd_list_dma);
+
+			if (!next_dsd) {
+				/*
+				 * Need to cleanup only this dsd_ptr, rest
+				 * will be done by sp_free_dma()
+				 */
+				kfree(dsd_ptr);
+				return 1;
+			}
+
+			if (sp) {
+				list_add_tail(&dsd_ptr->list,
+				    &((struct crc_context *)
+					    sp->u.scmd.ctx)->dsd_list);
+
+				sp->flags |= SRB_CRC_CTX_DSD_VALID;
+			} else {
+				list_add_tail(&dsd_ptr->list,
+				    &(tc->ctx->dsd_list));
+				*tc->ctx_dsd_alloced = 1;
+			}
+
+			/* add new list to cmd iocb or last list */
+			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+			*cur_dsd++ = dsd_list_len;
+			cur_dsd = (uint32_t *)next_dsd;
+		}
+		sle_dma = sg_dma_address(sg);
+
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+
+		avail_dsds--;
+	}
+	/* Null termination */
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	*cur_dsd++ = 0;
+	return 0;
+}
+
+/**
+ * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
+ *							Type 6 IOCB types.
+ *
+ * @sp: SRB command to process
+ * @cmd_pkt: Command type 3 IOCB
+ * @tot_dsds: Total number of segments to transfer
+ */
+inline int
+qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
+{
+	uint32_t		*cur_dsd, *fcp_dl;
+	scsi_qla_host_t		*vha;
+	struct scsi_cmnd	*cmd;
+	uint32_t		total_bytes = 0;
+	uint32_t		data_bytes;
+	uint32_t		dif_bytes;
+	uint8_t			bundling = 1;
+	uint16_t		blk_size;
+	uint8_t			*clr_ptr;
+	struct crc_context	*crc_ctx_pkt = NULL;
+	struct qla_hw_data	*ha;
+	uint8_t			additional_fcpcdb_len;
+	uint16_t		fcp_cmnd_len;
+	struct fcp_cmnd		*fcp_cmnd;
+	dma_addr_t		crc_ctx_dma;
+
+	cmd = GET_CMD_SP(sp);
+
+	/* Update entry type to indicate Command Type CRC_2 IOCB */
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
+
+	vha = sp->vha;
+	ha = vha->hw;
+
+	/* No data transfer */
+	data_bytes = scsi_bufflen(cmd);
+	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = cpu_to_le32(0);
+		return QLA_SUCCESS;
+	}
+
+	cmd_pkt->vp_index = sp->vha->vp_idx;
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		cmd_pkt->control_flags =
+		    cpu_to_le16(CF_WRITE_DATA);
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		cmd_pkt->control_flags =
+		    cpu_to_le16(CF_READ_DATA);
+	}
+
+	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
+	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
+	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
+		bundling = 0;
+
+	/* Allocate CRC context from global pool */
+	crc_ctx_pkt = sp->u.scmd.ctx =
+	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+
+	if (!crc_ctx_pkt)
+		goto crc_queuing_error;
+
+	/* Zero out CTX area. */
+	clr_ptr = (uint8_t *)crc_ctx_pkt;
+	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+
+	sp->flags |= SRB_CRC_CTX_DMA_VALID;
+
+	/* Set handle */
+	crc_ctx_pkt->handle = cmd_pkt->handle;
+
+	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
+	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
+
+	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+	/* Determine SCSI command length -- align to 4 byte boundary */
+	if (cmd->cmd_len > 16) {
+		additional_fcpcdb_len = cmd->cmd_len - 16;
+		if ((cmd->cmd_len % 4) != 0) {
+			/* SCSI cmd > 16 bytes must be multiple of 4 */
+			goto crc_queuing_error;
+		}
+		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+	} else {
+		additional_fcpcdb_len = 0;
+		fcp_cmnd_len = 12 + 16 + 4;
+	}
+
+	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
+
+	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
+	if (cmd->sc_data_direction == DMA_TO_DEVICE)
+		fcp_cmnd->additional_cdb_len |= 1;
+	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+		fcp_cmnd->additional_cdb_len |= 2;
+
+	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
+	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
+	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
+	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
+	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+	fcp_cmnd->task_management = 0;
+	fcp_cmnd->task_attribute = TSK_SIMPLE;
+
+	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+
+	/* Compute dif len and adjust data len to incude protection */
+	dif_bytes = 0;
+	blk_size = cmd->device->sector_size;
+	dif_bytes = (data_bytes / blk_size) * 8;
+
+	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
+	case SCSI_PROT_READ_INSERT:
+	case SCSI_PROT_WRITE_STRIP:
+	    total_bytes = data_bytes;
+	    data_bytes += dif_bytes;
+	    break;
+
+	case SCSI_PROT_READ_STRIP:
+	case SCSI_PROT_WRITE_INSERT:
+	case SCSI_PROT_READ_PASS:
+	case SCSI_PROT_WRITE_PASS:
+	    total_bytes = data_bytes + dif_bytes;
+	    break;
+	default:
+	    BUG();
+	}
+
+	if (!qla2x00_hba_err_chk_enabled(sp))
+		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+	/* HBA error checking enabled */
+	else if (IS_PI_UNINIT_CAPABLE(ha)) {
+		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
+		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+			SCSI_PROT_DIF_TYPE2))
+			fw_prot_opts |= BIT_10;
+		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
+		    SCSI_PROT_DIF_TYPE3)
+			fw_prot_opts |= BIT_11;
+	}
+
+	if (!bundling) {
+		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+	} else {
+		/*
+		 * Configure Bundling if we need to fetch interlaving
+		 * protection PCI accesses
+		 */
+		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
+							tot_prot_dsds);
+		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+	}
+
+	/* Finish the common fields of CRC pkt */
+	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
+	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
+	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
+	/* Fibre channel byte count */
+	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
+	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+	    additional_fcpcdb_len);
+	*fcp_dl = htonl(total_bytes);
+
+	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
+		cmd_pkt->byte_count = cpu_to_le32(0);
+		return QLA_SUCCESS;
+	}
+	/* Walks data segments */
+
+	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
+
+	if (!bundling && tot_prot_dsds) {
+		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
+			cur_dsd, tot_dsds, NULL))
+			goto crc_queuing_error;
+	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+			(tot_dsds - tot_prot_dsds), NULL))
+		goto crc_queuing_error;
+
+	if (bundling && tot_prot_dsds) {
+		/* Walks dif segments */
+		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
+		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
+				tot_prot_dsds, NULL))
+			goto crc_queuing_error;
+	}
+	return QLA_SUCCESS;
+
+crc_queuing_error:
+	/* Cleanup will be performed by the caller */
+
+	return QLA_FUNCTION_FAILED;
+}
+
+/**
+ * qla24xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla24xx_start_scsi(srb_t *sp)
+{
+	int		nseg;
+	unsigned long   flags;
+	uint32_t	*clr_ptr;
+	uint32_t        index;
+	uint32_t	handle;
+	struct cmd_type_7 *cmd_pkt;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Setup device pointers. */
+	req = vha->req;
+	rsp = req->rsp;
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		    QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	/* Zero out remaining portion of packet. */
+	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+	/* Set NPORT-ID and LUN number*/
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+	cmd_pkt->vp_index = sp->vha->vp_idx;
+
+	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+	cmd_pkt->task = TSK_SIMPLE;
+
+	/* Load SCSI command packet. */
+	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+	/* Build IOCB segments */
+	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+	/* Set total data segment count. */
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	wmb();
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_FUNCTION_FAILED;
+}
+
+/**
+ * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla24xx_dif_start_scsi(srb_t *sp)
+{
+	int			nseg;
+	unsigned long		flags;
+	uint32_t		*clr_ptr;
+	uint32_t		index;
+	uint32_t		handle;
+	uint16_t		cnt;
+	uint16_t		req_cnt = 0;
+	uint16_t		tot_dsds;
+	uint16_t		tot_prot_dsds;
+	uint16_t		fw_prot_opts = 0;
+	struct req_que		*req = NULL;
+	struct rsp_que		*rsp = NULL;
+	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host	*vha = sp->vha;
+	struct qla_hw_data	*ha = vha->hw;
+	struct cmd_type_crc_2	*cmd_pkt;
+	uint32_t		status = 0;
+
+#define QDSS_GOT_Q_SPACE	BIT_0
+
+	/* Only process protection or >16 cdb in this routine */
+	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+		if (cmd->cmd_len <= 16)
+			return qla24xx_start_scsi(sp);
+	}
+
+	/* Setup device pointers. */
+	req = vha->req;
+	rsp = req->rsp;
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		    QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Compute number of required data segments */
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+		else
+			sp->flags |= SRB_DMA_VALID;
+
+		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+			struct qla2_sgx sgx;
+			uint32_t	partial;
+
+			memset(&sgx, 0, sizeof(struct qla2_sgx));
+			sgx.tot_bytes = scsi_bufflen(cmd);
+			sgx.cur_sg = scsi_sglist(cmd);
+			sgx.sp = sp;
+
+			nseg = 0;
+			while (qla24xx_get_one_block_sg(
+			    cmd->device->sector_size, &sgx, &partial))
+				nseg++;
+		}
+	} else
+		nseg = 0;
+
+	/* number of required data segments */
+	tot_dsds = nseg;
+
+	/* Compute number of required protection segments */
+	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+		else
+			sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+		}
+	} else {
+		nseg = 0;
+	}
+
+	req_cnt = 1;
+	/* Total Data and protection sg segment(s) */
+	tot_prot_dsds = nseg;
+	tot_dsds += nseg;
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	status |= QDSS_GOT_Q_SPACE;
+
+	/* Build header part of command packet (excluding the OPCODE). */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	/* Fill-in common area */
+	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+	/* Set NPORT-ID and LUN number*/
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+
+	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+	/* Total Data and protection segment(s) */
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+	/* Build IOCB segments and adjust for data protection segments */
+	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+		QLA_SUCCESS)
+		goto queuing_error;
+
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	/* Specify response queue number where completion should happen */
+	cmd_pkt->entry_status = (uint8_t) rsp->id;
+	cmd_pkt->timeout = cpu_to_le16(0);
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (status & QDSS_GOT_Q_SPACE) {
+		req->outstanding_cmds[handle] = NULL;
+		req->cnt += req_cnt;
+	}
+	/* Cleanup will be performed by the caller (queuecommand) */
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_FUNCTION_FAILED;
+}
+
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
+{
+	int		nseg;
+	unsigned long   flags;
+	uint32_t	*clr_ptr;
+	uint32_t        index;
+	uint32_t	handle;
+	struct cmd_type_7 *cmd_pkt;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host *vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_qpair *qpair = sp->qpair;
+
+	/* Acquire qpair specific lock */
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+
+	/* Setup qpair pointers */
+	rsp = qpair->rsp;
+	req = qpair->req;
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		    QLA_SUCCESS) {
+			spin_unlock_irqrestore(&qpair->qp_lock, flags);
+			return QLA_FUNCTION_FAILED;
+		}
+		vha->marker_needed = 0;
+	}
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	/* Zero out remaining portion of packet. */
+	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+	/* Set NPORT-ID and LUN number*/
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+	cmd_pkt->task = TSK_SIMPLE;
+
+	/* Load SCSI command packet. */
+	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+	/* Build IOCB segments */
+	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+	/* Set total data segment count. */
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	wmb();
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+	return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+	int			nseg;
+	unsigned long		flags;
+	uint32_t		*clr_ptr;
+	uint32_t		index;
+	uint32_t		handle;
+	uint16_t		cnt;
+	uint16_t		req_cnt = 0;
+	uint16_t		tot_dsds;
+	uint16_t		tot_prot_dsds;
+	uint16_t		fw_prot_opts = 0;
+	struct req_que		*req = NULL;
+	struct rsp_que		*rsp = NULL;
+	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host	*vha = sp->fcport->vha;
+	struct qla_hw_data	*ha = vha->hw;
+	struct cmd_type_crc_2	*cmd_pkt;
+	uint32_t		status = 0;
+	struct qla_qpair	*qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE	BIT_0
+
+	/* Check for host side state */
+	if (!qpair->online) {
+		cmd->result = DID_NO_CONNECT << 16;
+		return QLA_INTERFACE_ERROR;
+	}
+
+	if (!qpair->difdix_supported &&
+		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+		cmd->result = DID_NO_CONNECT << 16;
+		return QLA_INTERFACE_ERROR;
+	}
+
+	/* Only process protection or >16 cdb in this routine */
+	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+		if (cmd->cmd_len <= 16)
+			return qla2xxx_start_scsi_mq(sp);
+	}
+
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+
+	/* Setup qpair pointers */
+	rsp = qpair->rsp;
+	req = qpair->req;
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		    QLA_SUCCESS) {
+			spin_unlock_irqrestore(&qpair->qp_lock, flags);
+			return QLA_FUNCTION_FAILED;
+		}
+		vha->marker_needed = 0;
+	}
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Compute number of required data segments */
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+		else
+			sp->flags |= SRB_DMA_VALID;
+
+		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+			struct qla2_sgx sgx;
+			uint32_t	partial;
+
+			memset(&sgx, 0, sizeof(struct qla2_sgx));
+			sgx.tot_bytes = scsi_bufflen(cmd);
+			sgx.cur_sg = scsi_sglist(cmd);
+			sgx.sp = sp;
+
+			nseg = 0;
+			while (qla24xx_get_one_block_sg(
+			    cmd->device->sector_size, &sgx, &partial))
+				nseg++;
+		}
+	} else
+		nseg = 0;
+
+	/* number of required data segments */
+	tot_dsds = nseg;
+
+	/* Compute number of required protection segments */
+	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+		else
+			sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+		}
+	} else {
+		nseg = 0;
+	}
+
+	req_cnt = 1;
+	/* Total Data and protection sg segment(s) */
+	tot_prot_dsds = nseg;
+	tot_dsds += nseg;
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	status |= QDSS_GOT_Q_SPACE;
+
+	/* Build header part of command packet (excluding the OPCODE). */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	/* Fill-in common area */
+	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+	/* Set NPORT-ID and LUN number*/
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+
+	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+	/* Total Data and protection segment(s) */
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+	/* Build IOCB segments and adjust for data protection segments */
+	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+		QLA_SUCCESS)
+		goto queuing_error;
+
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	cmd_pkt->timeout = cpu_to_le16(0);
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(vha, rsp);
+
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (status & QDSS_GOT_Q_SPACE) {
+		req->outstanding_cmds[handle] = NULL;
+		req->cnt += req_cnt;
+	}
+	/* Cleanup will be performed by the caller (queuecommand) */
+
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+	return QLA_FUNCTION_FAILED;
+}
+
+/* Generic Control-SRB manipulation functions. */
+
+/* hardware_lock assumed to be held. */
+
+void *
+__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
+{
+	scsi_qla_host_t *vha = qpair->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = qpair->req;
+	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
+	uint32_t index, handle;
+	request_t *pkt;
+	uint16_t cnt, req_cnt;
+
+	pkt = NULL;
+	req_cnt = 1;
+	handle = 0;
+
+	if (sp && (sp->type != SRB_SCSI_CMD)) {
+		/* Adjust entry-counts as needed. */
+		req_cnt = sp->iocbs;
+	}
+
+	/* Check for room on request queue. */
+	if (req->cnt < req_cnt + 2) {
+		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+		else if (IS_P3P_TYPE(ha))
+			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+		else if (IS_FWI2_CAPABLE(ha))
+			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+		else if (IS_QLAFX00(ha))
+			cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+		else
+			cnt = qla2x00_debounce_register(
+			    ISP_REQ_Q_OUT(ha, &reg->isp));
+
+		if  (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+			    (req->ring_index - cnt);
+	}
+	if (req->cnt < req_cnt + 2)
+		goto queuing_error;
+
+	if (sp) {
+		/* Check for room in outstanding command list. */
+		handle = req->current_outstanding_cmd;
+		for (index = 1; index < req->num_outstanding_cmds; index++) {
+			handle++;
+			if (handle == req->num_outstanding_cmds)
+				handle = 1;
+			if (!req->outstanding_cmds[handle])
+				break;
+		}
+		if (index == req->num_outstanding_cmds) {
+			ql_log(ql_log_warn, vha, 0x700b,
+			    "No room on outstanding cmd array.\n");
+			goto queuing_error;
+		}
+
+		/* Prep command array. */
+		req->current_outstanding_cmd = handle;
+		req->outstanding_cmds[handle] = sp;
+		sp->handle = handle;
+	}
+
+	/* Prep packet */
+	req->cnt -= req_cnt;
+	pkt = req->ring_ptr;
+	memset(pkt, 0, REQUEST_ENTRY_SIZE);
+	if (IS_QLAFX00(ha)) {
+		WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
+		WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+	} else {
+		pkt->entry_count = req_cnt;
+		pkt->handle = handle;
+	}
+
+	return pkt;
+
+queuing_error:
+	qpair->tgt_counters.num_alloc_iocb_failed++;
+	return pkt;
+}
+
+void *
+qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
+{
+	scsi_qla_host_t *vha = qpair->vha;
+
+	if (qla2x00_reset_active(vha))
+		return NULL;
+
+	return __qla2x00_alloc_iocbs(qpair, sp);
+}
+
+void *
+qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
+{
+	return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
+}
+
+static void
+qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
+	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
+		logio->control_flags |= LCF_NVME_PRLI;
+
+	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+	logio->port_id[1] = sp->fcport->d_id.b.area;
+	logio->port_id[2] = sp->fcport->d_id.b.domain;
+	logio->vp_index = sp->vha->vp_idx;
+}
+
+static void
+qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
+	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
+		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
+		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+	logio->port_id[1] = sp->fcport->d_id.b.area;
+	logio->port_id[2] = sp->fcport->d_id.b.domain;
+	logio->vp_index = sp->vha->vp_idx;
+}
+
+static void
+qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+	struct qla_hw_data *ha = sp->vha->hw;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	uint16_t opts;
+
+	mbx->entry_type = MBX_IOCB_TYPE;
+	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
+	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
+	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
+	if (HAS_EXTENDED_IDS(ha)) {
+		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+		mbx->mb10 = cpu_to_le16(opts);
+	} else {
+		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
+	}
+	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
+	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
+	    sp->fcport->d_id.b.al_pa);
+	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
+}
+
+static void
+qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags =
+	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+	if (!sp->fcport->se_sess ||
+	    !sp->fcport->keep_nport_handle)
+		logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+	logio->port_id[1] = sp->fcport->d_id.b.area;
+	logio->port_id[2] = sp->fcport->d_id.b.domain;
+	logio->vp_index = sp->vha->vp_idx;
+}
+
+static void
+qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+	struct qla_hw_data *ha = sp->vha->hw;
+
+	mbx->entry_type = MBX_IOCB_TYPE;
+	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
+	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
+	    cpu_to_le16(sp->fcport->loop_id):
+	    cpu_to_le16(sp->fcport->loop_id << 8);
+	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
+	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
+	    sp->fcport->d_id.b.al_pa);
+	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
+	/* Implicit: mbx->mbx10 = 0. */
+}
+
+static void
+qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
+	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	logio->vp_index = sp->vha->vp_idx;
+}
+
+static void
+qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+	struct qla_hw_data *ha = sp->vha->hw;
+
+	mbx->entry_type = MBX_IOCB_TYPE;
+	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
+	if (HAS_EXTENDED_IDS(ha)) {
+		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+		mbx->mb10 = cpu_to_le16(BIT_0);
+	} else {
+		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
+	}
+	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
+	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
+	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
+	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
+	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
+}
+
+static void
+qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
+{
+	uint32_t flags;
+	uint64_t lun;
+	struct fc_port *fcport = sp->fcport;
+	scsi_qla_host_t *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct srb_iocb *iocb = &sp->u.iocb_cmd;
+	struct req_que *req = vha->req;
+
+	flags = iocb->u.tmf.flags;
+	lun = iocb->u.tmf.lun;
+
+	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
+	tsk->entry_count = 1;
+	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
+	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+	tsk->control_flags = cpu_to_le32(flags);
+	tsk->port_id[0] = fcport->d_id.b.al_pa;
+	tsk->port_id[1] = fcport->d_id.b.area;
+	tsk->port_id[2] = fcport->d_id.b.domain;
+	tsk->vp_index = fcport->vha->vp_idx;
+
+	if (flags == TCF_LUN_RESET) {
+		int_to_scsilun(lun, &tsk->lun);
+		host_to_fcp_swap((uint8_t *)&tsk->lun,
+			sizeof(tsk->lun));
+	}
+}
+
+static void
+qla2x00_els_dcmd_sp_free(void *data)
+{
+	srb_t *sp = data;
+	struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+	kfree(sp->fcport);
+
+	if (elsio->u.els_logo.els_logo_pyld)
+		dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+		    elsio->u.els_logo.els_logo_pyld,
+		    elsio->u.els_logo.els_logo_pyld_dma);
+
+	del_timer(&elsio->timer);
+	qla2x00_rel_sp(sp);
+}
+
+static void
+qla2x00_els_dcmd_iocb_timeout(void *data)
+{
+	srb_t *sp = data;
+	fc_port_t *fcport = sp->fcport;
+	struct scsi_qla_host *vha = sp->vha;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	ql_dbg(ql_dbg_io, vha, 0x3069,
+	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
+	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
+	    fcport->d_id.b.al_pa);
+
+	complete(&lio->u.els_logo.comp);
+}
+
+static void
+qla2x00_els_dcmd_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	fc_port_t *fcport = sp->fcport;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	struct scsi_qla_host *vha = sp->vha;
+
+	ql_dbg(ql_dbg_io, vha, 0x3072,
+	    "%s hdl=%x, portid=%02x%02x%02x done\n",
+	    sp->name, sp->handle, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	complete(&lio->u.els_logo.comp);
+}
+
+int
+qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
+    port_id_t remote_did)
+{
+	srb_t *sp;
+	fc_port_t *fcport = NULL;
+	struct srb_iocb *elsio = NULL;
+	struct qla_hw_data *ha = vha->hw;
+	struct els_logo_payload logo_pyld;
+	int rval = QLA_SUCCESS;
+
+	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (!fcport) {
+	       ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
+	       return -ENOMEM;
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp) {
+		kfree(fcport);
+		ql_log(ql_log_info, vha, 0x70e6,
+		 "SRB allocation failed\n");
+		return -ENOMEM;
+	}
+
+	elsio = &sp->u.iocb_cmd;
+	fcport->loop_id = 0xFFFF;
+	fcport->d_id.b.domain = remote_did.b.domain;
+	fcport->d_id.b.area = remote_did.b.area;
+	fcport->d_id.b.al_pa = remote_did.b.al_pa;
+
+	ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	sp->type = SRB_ELS_DCMD;
+	sp->name = "ELS_DCMD";
+	sp->fcport = fcport;
+	qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+	elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
+	sp->done = qla2x00_els_dcmd_sp_done;
+	sp->free = qla2x00_els_dcmd_sp_free;
+
+	elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
+			    DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
+			    GFP_KERNEL);
+
+	if (!elsio->u.els_logo.els_logo_pyld) {
+		sp->free(sp);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
+
+	elsio->u.els_logo.els_cmd = els_opcode;
+	logo_pyld.opcode = els_opcode;
+	logo_pyld.s_id[0] = vha->d_id.b.al_pa;
+	logo_pyld.s_id[1] = vha->d_id.b.area;
+	logo_pyld.s_id[2] = vha->d_id.b.domain;
+	host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
+	memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
+
+	memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
+	    sizeof(struct els_logo_payload));
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		sp->free(sp);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_io, vha, 0x3074,
+	    "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
+	    sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+	wait_for_completion(&elsio->u.els_logo.comp);
+
+	sp->free(sp);
+	return rval;
+}
+
+static void
+qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+	scsi_qla_host_t *vha = sp->vha;
+	struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+	els_iocb->entry_type = ELS_IOCB_TYPE;
+	els_iocb->entry_count = 1;
+	els_iocb->sys_define = 0;
+	els_iocb->entry_status = 0;
+	els_iocb->handle = sp->handle;
+	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	els_iocb->tx_dsd_count = 1;
+	els_iocb->vp_index = vha->vp_idx;
+	els_iocb->sof_type = EST_SOFI3;
+	els_iocb->rx_dsd_count = 0;
+	els_iocb->opcode = elsio->u.els_logo.els_cmd;
+
+	els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+	els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+	els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+	els_iocb->control_flags = 0;
+
+	els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
+	els_iocb->tx_address[0] =
+	    cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
+	els_iocb->tx_address[1] =
+	    cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+	els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
+
+	els_iocb->rx_byte_count = 0;
+	els_iocb->rx_address[0] = 0;
+	els_iocb->rx_address[1] = 0;
+	els_iocb->rx_len = 0;
+
+	sp->vha->qla_stats.control_requests++;
+}
+
+static void
+qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+
+        els_iocb->entry_type = ELS_IOCB_TYPE;
+        els_iocb->entry_count = 1;
+        els_iocb->sys_define = 0;
+        els_iocb->entry_status = 0;
+        els_iocb->handle = sp->handle;
+        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	els_iocb->vp_index = sp->vha->vp_idx;
+        els_iocb->sof_type = EST_SOFI3;
+	els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+
+	els_iocb->opcode =
+	    sp->type == SRB_ELS_CMD_RPT ?
+	    bsg_request->rqst_data.r_els.els_code :
+	    bsg_request->rqst_data.h_els.command_code;
+        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+        els_iocb->control_flags = 0;
+        els_iocb->rx_byte_count =
+            cpu_to_le32(bsg_job->reply_payload.payload_len);
+        els_iocb->tx_byte_count =
+            cpu_to_le32(bsg_job->request_payload.payload_len);
+
+        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
+            (bsg_job->request_payload.sg_list)));
+        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
+            (bsg_job->request_payload.sg_list)));
+        els_iocb->tx_len = cpu_to_le32(sg_dma_len
+            (bsg_job->request_payload.sg_list));
+
+        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
+            (bsg_job->reply_payload.sg_list)));
+        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
+            (bsg_job->reply_payload.sg_list)));
+        els_iocb->rx_len = cpu_to_le32(sg_dma_len
+            (bsg_job->reply_payload.sg_list));
+
+	sp->vha->qla_stats.control_requests++;
+}
+
+static void
+qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
+{
+	uint16_t        avail_dsds;
+	uint32_t        *cur_dsd;
+	struct scatterlist *sg;
+	int index;
+	uint16_t tot_dsds;
+	scsi_qla_host_t *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	int loop_iterartion = 0;
+	int entry_count = 1;
+
+	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
+	ct_iocb->entry_type = CT_IOCB_TYPE;
+	ct_iocb->entry_status = 0;
+	ct_iocb->handle1 = sp->handle;
+	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
+	ct_iocb->status = cpu_to_le16(0);
+	ct_iocb->control_flags = cpu_to_le16(0);
+	ct_iocb->timeout = 0;
+	ct_iocb->cmd_dsd_count =
+	    cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	ct_iocb->total_dsd_count =
+	    cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
+	ct_iocb->req_bytecount =
+	    cpu_to_le32(bsg_job->request_payload.payload_len);
+	ct_iocb->rsp_bytecount =
+	    cpu_to_le32(bsg_job->reply_payload.payload_len);
+
+	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
+	    (bsg_job->request_payload.sg_list)));
+	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
+	    (bsg_job->request_payload.sg_list)));
+	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
+
+	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
+	    (bsg_job->reply_payload.sg_list)));
+	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
+	    (bsg_job->reply_payload.sg_list)));
+	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
+
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
+	index = 0;
+	tot_dsds = bsg_job->reply_payload.sg_cnt;
+
+	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+		dma_addr_t       sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			* Five DSDs are available in the Cont.
+			* Type 1 IOCB.
+			       */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+			    vha->hw->req_q_map[0]);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		loop_iterartion++;
+		avail_dsds--;
+	}
+	ct_iocb->entry_count = entry_count;
+
+	sp->vha->qla_stats.control_requests++;
+}
+
+static void
+qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
+{
+	uint16_t        avail_dsds;
+	uint32_t        *cur_dsd;
+	struct scatterlist *sg;
+	int index;
+	uint16_t cmd_dsds, rsp_dsds;
+	scsi_qla_host_t *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	int entry_count = 1;
+	cont_a64_entry_t *cont_pkt = NULL;
+
+	ct_iocb->entry_type = CT_IOCB_TYPE;
+        ct_iocb->entry_status = 0;
+        ct_iocb->sys_define = 0;
+        ct_iocb->handle = sp->handle;
+
+	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	ct_iocb->vp_index = sp->vha->vp_idx;
+	ct_iocb->comp_status = cpu_to_le16(0);
+
+	cmd_dsds = bsg_job->request_payload.sg_cnt;
+	rsp_dsds = bsg_job->reply_payload.sg_cnt;
+
+	ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
+        ct_iocb->timeout = 0;
+	ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
+        ct_iocb->cmd_byte_count =
+            cpu_to_le32(bsg_job->request_payload.payload_len);
+
+	avail_dsds = 2;
+	cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
+	index = 0;
+
+	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
+		dma_addr_t       sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Cont.
+			 * Type 1 IOCB.
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(
+			    vha, ha->req_q_map[0]);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+
+	index = 0;
+
+	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
+		dma_addr_t       sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			* Five DSDs are available in the Cont.
+			* Type 1 IOCB.
+			       */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+			    ha->req_q_map[0]);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+        ct_iocb->entry_count = entry_count;
+}
+
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+	int		nseg;
+	unsigned long   flags;
+	struct scsi_cmnd *cmd;
+	uint32_t	*clr_ptr;
+	uint32_t        index;
+	uint32_t	handle;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct device_reg_82xx __iomem *reg;
+	uint32_t dbval;
+	uint32_t *fcp_dl;
+	uint8_t additional_cdb_len;
+	struct ct6_dsd *ctx;
+	struct scsi_qla_host *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+
+	/* Setup device pointers. */
+	reg = &ha->iobase->isp82;
+	cmd = GET_CMD_SP(sp);
+	req = vha->req;
+	rsp = ha->rsp_q_map[0];
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	dbval = 0x04 | (ha->portnum << 5);
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req,
+			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x300c,
+			    "qla2x00_marker failed for cmd=%p.\n", cmd);
+			return QLA_FUNCTION_FAILED;
+		}
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+
+	if (tot_dsds > ql2xshiftctondsd) {
+		struct cmd_type_6 *cmd_pkt;
+		uint16_t more_dsd_lists = 0;
+		struct dsd_dma *dsd_ptr;
+		uint16_t i;
+
+		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+			ql_dbg(ql_dbg_io, vha, 0x300d,
+			    "Num of DSD list %d is than %d for cmd=%p.\n",
+			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+			    cmd);
+			goto queuing_error;
+		}
+
+		if (more_dsd_lists <= ha->gbl_dsd_avail)
+			goto sufficient_dsds;
+		else
+			more_dsd_lists -= ha->gbl_dsd_avail;
+
+		for (i = 0; i < more_dsd_lists; i++) {
+			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+			if (!dsd_ptr) {
+				ql_log(ql_log_fatal, vha, 0x300e,
+				    "Failed to allocate memory for dsd_dma "
+				    "for cmd=%p.\n", cmd);
+				goto queuing_error;
+			}
+
+			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+			if (!dsd_ptr->dsd_addr) {
+				kfree(dsd_ptr);
+				ql_log(ql_log_fatal, vha, 0x300f,
+				    "Failed to allocate memory for dsd_addr "
+				    "for cmd=%p.\n", cmd);
+				goto queuing_error;
+			}
+			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+			ha->gbl_dsd_avail++;
+		}
+
+sufficient_dsds:
+		req_cnt = 1;
+
+		if (req->cnt < (req_cnt + 2)) {
+			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+				&reg->req_q_out[0]);
+			if (req->ring_index < cnt)
+				req->cnt = cnt - req->ring_index;
+			else
+				req->cnt = req->length -
+					(req->ring_index - cnt);
+			if (req->cnt < (req_cnt + 2))
+				goto queuing_error;
+		}
+
+		ctx = sp->u.scmd.ctx =
+		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+		if (!ctx) {
+			ql_log(ql_log_fatal, vha, 0x3010,
+			    "Failed to allocate ctx for cmd=%p.\n", cmd);
+			goto queuing_error;
+		}
+
+		memset(ctx, 0, sizeof(struct ct6_dsd));
+		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+		if (!ctx->fcp_cmnd) {
+			ql_log(ql_log_fatal, vha, 0x3011,
+			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+			goto queuing_error;
+		}
+
+		/* Initialize the DSD list and dma handle */
+		INIT_LIST_HEAD(&ctx->dsd_list);
+		ctx->dsd_use_cnt = 0;
+
+		if (cmd->cmd_len > 16) {
+			additional_cdb_len = cmd->cmd_len - 16;
+			if ((cmd->cmd_len % 4) != 0) {
+				/* SCSI command bigger than 16 bytes must be
+				 * multiple of 4
+				 */
+				ql_log(ql_log_warn, vha, 0x3012,
+				    "scsi cmd len %d not multiple of 4 "
+				    "for cmd=%p.\n", cmd->cmd_len, cmd);
+				goto queuing_error_fcp_cmnd;
+			}
+			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+		} else {
+			additional_cdb_len = 0;
+			ctx->fcp_cmnd_len = 12 + 16 + 4;
+		}
+
+		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+		/* Zero out remaining portion of packet. */
+		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+		clr_ptr = (uint32_t *)cmd_pkt + 2;
+		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+		/* Set NPORT-ID and LUN number*/
+		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+		cmd_pkt->vp_index = sp->vha->vp_idx;
+
+		/* Build IOCB segments */
+		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+			goto queuing_error_fcp_cmnd;
+
+		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+		/* build FCP_CMND IU */
+		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+		if (cmd->sc_data_direction == DMA_TO_DEVICE)
+			ctx->fcp_cmnd->additional_cdb_len |= 1;
+		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+			ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+		/* Populate the FCP_PRIO. */
+		if (ha->flags.fcp_prio_enabled)
+			ctx->fcp_cmnd->task_attribute |=
+			    sp->fcport->fcp_prio << 3;
+
+		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+		    additional_cdb_len);
+		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+		cmd_pkt->fcp_cmnd_dseg_address[0] =
+		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+		cmd_pkt->fcp_cmnd_dseg_address[1] =
+		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+		sp->flags |= SRB_FCP_CMND_DMA_VALID;
+		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+		/* Set total data segment count. */
+		cmd_pkt->entry_count = (uint8_t)req_cnt;
+		/* Specify response queue number where
+		 * completion should happen
+		 */
+		cmd_pkt->entry_status = (uint8_t) rsp->id;
+	} else {
+		struct cmd_type_7 *cmd_pkt;
+		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+		if (req->cnt < (req_cnt + 2)) {
+			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+			    &reg->req_q_out[0]);
+			if (req->ring_index < cnt)
+				req->cnt = cnt - req->ring_index;
+			else
+				req->cnt = req->length -
+					(req->ring_index - cnt);
+		}
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+
+		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+		/* Zero out remaining portion of packet. */
+		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+		clr_ptr = (uint32_t *)cmd_pkt + 2;
+		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+		/* Set NPORT-ID and LUN number*/
+		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+		cmd_pkt->vp_index = sp->vha->vp_idx;
+
+		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+		    sizeof(cmd_pkt->lun));
+
+		/* Populate the FCP_PRIO. */
+		if (ha->flags.fcp_prio_enabled)
+			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+		/* Load SCSI command packet. */
+		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+		/* Build IOCB segments */
+		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+		/* Set total data segment count. */
+		cmd_pkt->entry_count = (uint8_t)req_cnt;
+		/* Specify response queue number where
+		 * completion should happen.
+		 */
+		cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+	}
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	/* write, read and verify logic */
+	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+	if (ql2xdbwr)
+		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
+	else {
+		WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+		wmb();
+		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+			WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+			wmb();
+		}
+	}
+
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(vha, rsp);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	if (sp->u.scmd.ctx) {
+		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
+		sp->u.scmd.ctx = NULL;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
+{
+	struct srb_iocb *aio = &sp->u.iocb_cmd;
+	scsi_qla_host_t *vha = sp->vha;
+	struct req_que *req = vha->req;
+
+	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
+	abt_iocb->entry_type = ABORT_IOCB_TYPE;
+	abt_iocb->entry_count = 1;
+	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	abt_iocb->handle_to_abort =
+	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+	abt_iocb->vp_index = vha->vp_idx;
+	abt_iocb->req_que_no = cpu_to_le16(req->id);
+	/* Send the command to the firmware */
+	wmb();
+}
+
+static void
+qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
+{
+	int i, sz;
+
+	mbx->entry_type = MBX_IOCB_TYPE;
+	mbx->handle = sp->handle;
+	sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
+
+	for (i = 0; i < sz; i++)
+		mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+}
+
+static void
+qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
+{
+	sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
+	qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
+	ct_pkt->handle = sp->handle;
+}
+
+static void qla2x00_send_notify_ack_iocb(srb_t *sp,
+	struct nack_to_isp *nack)
+{
+	struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
+
+	nack->entry_type = NOTIFY_ACK_TYPE;
+	nack->entry_count = 1;
+	nack->ox_id = ntfy->ox_id;
+
+	nack->u.isp24.handle = sp->handle;
+	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+		nack->u.isp24.flags = ntfy->u.isp24.flags &
+			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+	}
+	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+	nack->u.isp24.status = ntfy->u.isp24.status;
+	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+	nack->u.isp24.srr_flags = 0;
+	nack->u.isp24.srr_reject_code = 0;
+	nack->u.isp24.srr_reject_code_expl = 0;
+	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+}
+
+/*
+ * Build NVME LS request
+ */
+static int
+qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
+{
+	struct srb_iocb *nvme;
+	int     rval = QLA_SUCCESS;
+
+	nvme = &sp->u.iocb_cmd;
+	cmd_pkt->entry_type = PT_LS4_REQUEST;
+	cmd_pkt->entry_count = 1;
+	cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+
+	cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+	cmd_pkt->tx_dseg_count = 1;
+	cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+	cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
+	cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
+	cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+
+	cmd_pkt->rx_dseg_count = 1;
+	cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+	cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
+	cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
+	cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+
+	return rval;
+}
+
+int
+qla2x00_start_sp(srb_t *sp)
+{
+	int rval;
+	scsi_qla_host_t *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	void *pkt;
+	unsigned long flags;
+
+	rval = QLA_FUNCTION_FAILED;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	pkt = qla2x00_alloc_iocbs(vha, sp);
+	if (!pkt) {
+		ql_log(ql_log_warn, vha, 0x700c,
+		    "qla2x00_alloc_iocbs failed.\n");
+		goto done;
+	}
+
+	rval = QLA_SUCCESS;
+	switch (sp->type) {
+	case SRB_LOGIN_CMD:
+		IS_FWI2_CAPABLE(ha) ?
+		    qla24xx_login_iocb(sp, pkt) :
+		    qla2x00_login_iocb(sp, pkt);
+		break;
+	case SRB_PRLI_CMD:
+		qla24xx_prli_iocb(sp, pkt);
+		break;
+	case SRB_LOGOUT_CMD:
+		IS_FWI2_CAPABLE(ha) ?
+		    qla24xx_logout_iocb(sp, pkt) :
+		    qla2x00_logout_iocb(sp, pkt);
+		break;
+	case SRB_ELS_CMD_RPT:
+	case SRB_ELS_CMD_HST:
+		qla24xx_els_iocb(sp, pkt);
+		break;
+	case SRB_CT_CMD:
+		IS_FWI2_CAPABLE(ha) ?
+		    qla24xx_ct_iocb(sp, pkt) :
+		    qla2x00_ct_iocb(sp, pkt);
+		break;
+	case SRB_ADISC_CMD:
+		IS_FWI2_CAPABLE(ha) ?
+		    qla24xx_adisc_iocb(sp, pkt) :
+		    qla2x00_adisc_iocb(sp, pkt);
+		break;
+	case SRB_TM_CMD:
+		IS_QLAFX00(ha) ?
+		    qlafx00_tm_iocb(sp, pkt) :
+		    qla24xx_tm_iocb(sp, pkt);
+		break;
+	case SRB_FXIOCB_DCMD:
+	case SRB_FXIOCB_BCMD:
+		qlafx00_fxdisc_iocb(sp, pkt);
+		break;
+	case SRB_NVME_LS:
+		qla_nvme_ls(sp, pkt);
+		break;
+	case SRB_ABT_CMD:
+		IS_QLAFX00(ha) ?
+			qlafx00_abort_iocb(sp, pkt) :
+			qla24xx_abort_iocb(sp, pkt);
+		break;
+	case SRB_ELS_DCMD:
+		qla24xx_els_logo_iocb(sp, pkt);
+		break;
+	case SRB_CT_PTHRU_CMD:
+		qla2x00_ctpthru_cmd_iocb(sp, pkt);
+		break;
+	case SRB_MB_IOCB:
+		qla2x00_mb_iocb(sp, pkt);
+		break;
+	case SRB_NACK_PLOGI:
+	case SRB_NACK_PRLI:
+	case SRB_NACK_LOGO:
+		qla2x00_send_notify_ack_iocb(sp, pkt);
+		break;
+	default:
+		break;
+	}
+
+	wmb();
+	qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+done:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return rval;
+}
+
+static void
+qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
+				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
+{
+	uint16_t avail_dsds;
+	uint32_t *cur_dsd;
+	uint32_t req_data_len = 0;
+	uint32_t rsp_data_len = 0;
+	struct scatterlist *sg;
+	int index;
+	int entry_count = 1;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+
+	/*Update entry type to indicate bidir command */
+	*((uint32_t *)(&cmd_pkt->entry_type)) =
+		cpu_to_le32(COMMAND_BIDIRECTIONAL);
+
+	/* Set the transfer direction, in this set both flags
+	 * Also set the BD_WRAP_BACK flag, firmware will take care
+	 * assigning DID=SID for outgoing pkts.
+	 */
+	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+	cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+							BD_WRAP_BACK);
+
+	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
+	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
+	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+
+	vha->bidi_stats.transfer_bytes += req_data_len;
+	vha->bidi_stats.io_count++;
+
+	vha->qla_stats.output_bytes += req_data_len;
+	vha->qla_stats.output_requests++;
+
+	/* Only one dsd is available for bidirectional IOCB, remaining dsds
+	 * are bundled in continuation iocb
+	 */
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+
+	index = 0;
+
+	for_each_sg(bsg_job->request_payload.sg_list, sg,
+				bsg_job->request_payload.sg_cnt, index) {
+		dma_addr_t sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets */
+		if (avail_dsds == 0) {
+			/* Continuation type 1 IOCB can accomodate
+			 * 5 DSDS
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+	/* For read request DSD will always goes to continuation IOCB
+	 * and follow the write DSD. If there is room on the current IOCB
+	 * then it is added to that IOCB else new continuation IOCB is
+	 * allocated.
+	 */
+	for_each_sg(bsg_job->reply_payload.sg_list, sg,
+				bsg_job->reply_payload.sg_cnt, index) {
+		dma_addr_t sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets */
+		if (avail_dsds == 0) {
+			/* Continuation type 1 IOCB can accomodate
+			 * 5 DSDS
+			 */
+			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+			entry_count++;
+		}
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+	/* This value should be same as number of IOCB required for this cmd */
+	cmd_pkt->entry_count = entry_count;
+}
+
+int
+qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
+{
+
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	uint32_t handle;
+	uint32_t index;
+	uint16_t req_cnt;
+	uint16_t cnt;
+	uint32_t *clr_ptr;
+	struct cmd_bidir *cmd_pkt = NULL;
+	struct rsp_que *rsp;
+	struct req_que *req;
+	int rval = EXT_STATUS_OK;
+
+	rval = QLA_SUCCESS;
+
+	rsp = ha->rsp_q_map[0];
+	req = vha->req;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req,
+			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+			return EXT_STATUS_MAILBOX;
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+
+	if (index == req->num_outstanding_cmds) {
+		rval = EXT_STATUS_BUSY;
+		goto queuing_error;
+	}
+
+	/* Calculate number of IOCB required */
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+	/* Check for room on request queue. */
+	if (req->cnt < req_cnt + 2) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if  (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+	}
+	if (req->cnt < req_cnt + 2) {
+		rval = EXT_STATUS_BUSY;
+		goto queuing_error;
+	}
+
+	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	/* Zero out remaining portion of packet. */
+	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+	/* Set NPORT-ID  (of vha)*/
+	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
+	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = vha->d_id.b.area;
+	cmd_pkt->port_id[2] = vha->d_id.b.domain;
+
+	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
+	cmd_pkt->entry_status = (uint8_t) rsp->id;
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	req->cnt -= req_cnt;
+
+	/* Send the command to the firmware */
+	wmb();
+	qla2x00_start_iocbs(vha, req);
+queuing_error:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return rval;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_isr.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_isr.c
new file mode 100644
index 0000000..b39faf2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_isr.c
@@ -0,0 +1,3637 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/t10-pi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/fc/fc_fs.h>
+#include <linux/nvme-fc-driver.h>
+
+static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
+static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
+static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
+static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
+	sts_entry_t *);
+
+/**
+ * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla2100_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct device_reg_2xxx __iomem *reg;
+	int		status;
+	unsigned long	iter;
+	uint16_t	hccr;
+	uint16_t	mb[4];
+	struct rsp_que *rsp;
+	unsigned long	flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x505d,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return (IRQ_NONE);
+	}
+
+	ha = rsp->hw;
+	reg = &ha->iobase->isp;
+	status = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	for (iter = 50; iter--; ) {
+		hccr = RD_REG_WORD(&reg->hccr);
+		if (qla2x00_check_reg16_for_disconnect(vha, hccr))
+			break;
+		if (hccr & HCCR_RISC_PAUSE) {
+			if (pci_channel_offline(ha->pdev))
+				break;
+
+			/*
+			 * Issue a "HARD" reset in order for the RISC interrupt
+			 * bit to be cleared.  Schedule a big hammer to get
+			 * out of the RISC PAUSED state.
+			 */
+			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+			RD_REG_WORD(&reg->hccr);
+
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			break;
+		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
+			break;
+
+		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+			RD_REG_WORD(&reg->hccr);
+
+			/* Get mailbox data. */
+			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
+			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
+				qla2x00_mbx_completion(vha, mb[0]);
+				status |= MBX_INTERRUPT;
+			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
+				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
+				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
+				qla2x00_async_event(vha, rsp, mb);
+			} else {
+				/*EMPTY*/
+				ql_dbg(ql_dbg_async, vha, 0x5025,
+				    "Unrecognized interrupt type (%d).\n",
+				    mb[0]);
+			}
+			/* Release mailbox registers. */
+			WRT_REG_WORD(&reg->semaphore, 0);
+			RD_REG_WORD(&reg->semaphore);
+		} else {
+			qla2x00_process_response_queue(rsp);
+
+			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+			RD_REG_WORD(&reg->hccr);
+		}
+	}
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return (IRQ_HANDLED);
+}
+
+bool
+qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
+{
+	/* Check for PCI disconnection */
+	if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
+		if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
+		    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
+		    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
+			/*
+			 * Schedule this (only once) on the default system
+			 * workqueue so that all the adapter workqueues and the
+			 * DPC thread can be shutdown cleanly.
+			 */
+			schedule_work(&vha->hw->board_disable);
+		}
+		return true;
+	} else
+		return false;
+}
+
+bool
+qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
+{
+	return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
+}
+
+/**
+ * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla2300_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct device_reg_2xxx __iomem *reg;
+	int		status;
+	unsigned long	iter;
+	uint32_t	stat;
+	uint16_t	hccr;
+	uint16_t	mb[4];
+	struct rsp_que *rsp;
+	struct qla_hw_data *ha;
+	unsigned long	flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x5058,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return (IRQ_NONE);
+	}
+
+	ha = rsp->hw;
+	reg = &ha->iobase->isp;
+	status = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	for (iter = 50; iter--; ) {
+		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
+			break;
+		if (stat & HSR_RISC_PAUSED) {
+			if (unlikely(pci_channel_offline(ha->pdev)))
+				break;
+
+			hccr = RD_REG_WORD(&reg->hccr);
+
+			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
+				ql_log(ql_log_warn, vha, 0x5026,
+				    "Parity error -- HCCR=%x, Dumping "
+				    "firmware.\n", hccr);
+			else
+				ql_log(ql_log_warn, vha, 0x5027,
+				    "RISC paused -- HCCR=%x, Dumping "
+				    "firmware.\n", hccr);
+
+			/*
+			 * Issue a "HARD" reset in order for the RISC
+			 * interrupt bit to be cleared.  Schedule a big
+			 * hammer to get out of the RISC PAUSED state.
+			 */
+			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+			RD_REG_WORD(&reg->hccr);
+
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			break;
+		} else if ((stat & HSR_RISC_INT) == 0)
+			break;
+
+		switch (stat & 0xff) {
+		case 0x1:
+		case 0x2:
+		case 0x10:
+		case 0x11:
+			qla2x00_mbx_completion(vha, MSW(stat));
+			status |= MBX_INTERRUPT;
+
+			/* Release mailbox registers. */
+			WRT_REG_WORD(&reg->semaphore, 0);
+			break;
+		case 0x12:
+			mb[0] = MSW(stat);
+			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
+			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
+			qla2x00_async_event(vha, rsp, mb);
+			break;
+		case 0x13:
+			qla2x00_process_response_queue(rsp);
+			break;
+		case 0x15:
+			mb[0] = MBA_CMPLT_1_16BIT;
+			mb[1] = MSW(stat);
+			qla2x00_async_event(vha, rsp, mb);
+			break;
+		case 0x16:
+			mb[0] = MBA_SCSI_COMPLETION;
+			mb[1] = MSW(stat);
+			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
+			qla2x00_async_event(vha, rsp, mb);
+			break;
+		default:
+			ql_dbg(ql_dbg_async, vha, 0x5028,
+			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
+			break;
+		}
+		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+		RD_REG_WORD_RELAXED(&reg->hccr);
+	}
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return (IRQ_HANDLED);
+}
+
+/**
+ * qla2x00_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+static void
+qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+	uint16_t	cnt;
+	uint32_t	mboxes;
+	uint16_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	/* Read all mbox registers? */
+	WARN_ON_ONCE(ha->mbx_count > 32);
+	mboxes = (1ULL << ha->mbx_count) - 1;
+	if (!ha->mcp)
+		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
+	else
+		mboxes = ha->mcp->in_mb;
+
+	/* Load return mailbox registers. */
+	ha->flags.mbox_int = 1;
+	ha->mailbox_out[0] = mb0;
+	mboxes >>= 1;
+	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
+
+	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+		if (IS_QLA2200(ha) && cnt == 8)
+			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
+		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
+			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
+		else if (mboxes & BIT_0)
+			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+
+		wptr++;
+		mboxes >>= 1;
+	}
+}
+
+static void
+qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
+{
+	static char *event[] =
+		{ "Complete", "Request Notification", "Time Extension" };
+	int rval;
+	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
+	struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
+	uint16_t __iomem *wptr;
+	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
+
+	/* Seed data -- mailbox1 -> mailbox7. */
+	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
+		wptr = (uint16_t __iomem *)&reg24->mailbox1;
+	else if (IS_QLA8044(vha->hw))
+		wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+	else
+		return;
+
+	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
+		mb[cnt] = RD_REG_WORD(wptr);
+
+	ql_dbg(ql_dbg_async, vha, 0x5021,
+	    "Inter-Driver Communication %s -- "
+	    "%04x %04x %04x %04x %04x %04x %04x.\n",
+	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
+	    mb[4], mb[5], mb[6]);
+	switch (aen) {
+	/* Handle IDC Error completion case. */
+	case MBA_IDC_COMPLETE:
+		if (mb[1] >> 15) {
+			vha->hw->flags.idc_compl_status = 1;
+			if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
+				complete(&vha->hw->dcbx_comp);
+		}
+		break;
+
+	case MBA_IDC_NOTIFY:
+		/* Acknowledgement needed? [Notify && non-zero timeout]. */
+		timeout = (descr >> 8) & 0xf;
+		ql_dbg(ql_dbg_async, vha, 0x5022,
+		    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
+		    vha->host_no, event[aen & 0xff], timeout);
+
+		if (!timeout)
+			return;
+		rval = qla2x00_post_idc_ack_work(vha, mb);
+		if (rval != QLA_SUCCESS)
+			ql_log(ql_log_warn, vha, 0x5023,
+			    "IDC failed to post ACK.\n");
+		break;
+	case MBA_IDC_TIME_EXT:
+		vha->hw->idc_extend_tmo = descr;
+		ql_dbg(ql_dbg_async, vha, 0x5087,
+		    "%lu Inter-Driver Communication %s -- "
+		    "Extend timeout by=%d.\n",
+		    vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
+		break;
+	}
+}
+
+#define LS_UNKNOWN	2
+const char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
+{
+	static const char *const link_speeds[] = {
+		"1", "2", "?", "4", "8", "16", "32", "10"
+	};
+#define	QLA_LAST_SPEED	7
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return link_speeds[0];
+	else if (speed == 0x13)
+		return link_speeds[QLA_LAST_SPEED];
+	else if (speed < QLA_LAST_SPEED)
+		return link_speeds[speed];
+	else
+		return link_speeds[LS_UNKNOWN];
+}
+
+static void
+qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/*
+	 * 8200 AEN Interpretation:
+	 * mb[0] = AEN code
+	 * mb[1] = AEN Reason code
+	 * mb[2] = LSW of Peg-Halt Status-1 Register
+	 * mb[6] = MSW of Peg-Halt Status-1 Register
+	 * mb[3] = LSW of Peg-Halt Status-2 register
+	 * mb[7] = MSW of Peg-Halt Status-2 register
+	 * mb[4] = IDC Device-State Register value
+	 * mb[5] = IDC Driver-Presence Register value
+	 */
+	ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
+	    "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
+	    mb[0], mb[1], mb[2], mb[6]);
+	ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
+	    "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
+	    "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
+
+	if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
+				IDC_HEARTBEAT_FAILURE)) {
+		ha->flags.nic_core_hung = 1;
+		ql_log(ql_log_warn, vha, 0x5060,
+		    "83XX: F/W Error Reported: Check if reset required.\n");
+
+		if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
+			uint32_t protocol_engine_id, fw_err_code, err_level;
+
+			/*
+			 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
+			 *  - PEG-Halt Status-1 Register:
+			 *	(LSW = mb[2], MSW = mb[6])
+			 *	Bits 0-7   = protocol-engine ID
+			 *	Bits 8-28  = f/w error code
+			 *	Bits 29-31 = Error-level
+			 *	    Error-level 0x1 = Non-Fatal error
+			 *	    Error-level 0x2 = Recoverable Fatal error
+			 *	    Error-level 0x4 = UnRecoverable Fatal error
+			 *  - PEG-Halt Status-2 Register:
+			 *	(LSW = mb[3], MSW = mb[7])
+			 */
+			protocol_engine_id = (mb[2] & 0xff);
+			fw_err_code = (((mb[2] & 0xff00) >> 8) |
+			    ((mb[6] & 0x1fff) << 8));
+			err_level = ((mb[6] & 0xe000) >> 13);
+			ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
+			    "Register: protocol_engine_id=0x%x "
+			    "fw_err_code=0x%x err_level=0x%x.\n",
+			    protocol_engine_id, fw_err_code, err_level);
+			ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
+			    "Register: 0x%x%x.\n", mb[7], mb[3]);
+			if (err_level == ERR_LEVEL_NON_FATAL) {
+				ql_log(ql_log_warn, vha, 0x5063,
+				    "Not a fatal error, f/w has recovered itself.\n");
+			} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
+				ql_log(ql_log_fatal, vha, 0x5064,
+				    "Recoverable Fatal error: Chip reset "
+				    "required.\n");
+				qla83xx_schedule_work(vha,
+				    QLA83XX_NIC_CORE_RESET);
+			} else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
+				ql_log(ql_log_fatal, vha, 0x5065,
+				    "Unrecoverable Fatal error: Set FAILED "
+				    "state, reboot required.\n");
+				qla83xx_schedule_work(vha,
+				    QLA83XX_NIC_CORE_UNRECOVERABLE);
+			}
+		}
+
+		if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
+			uint16_t peg_fw_state, nw_interface_link_up;
+			uint16_t nw_interface_signal_detect, sfp_status;
+			uint16_t htbt_counter, htbt_monitor_enable;
+			uint16_t sfp_additional_info, sfp_multirate;
+			uint16_t sfp_tx_fault, link_speed, dcbx_status;
+
+			/*
+			 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
+			 *  - PEG-to-FC Status Register:
+			 *	(LSW = mb[2], MSW = mb[6])
+			 *	Bits 0-7   = Peg-Firmware state
+			 *	Bit 8      = N/W Interface Link-up
+			 *	Bit 9      = N/W Interface signal detected
+			 *	Bits 10-11 = SFP Status
+			 *	  SFP Status 0x0 = SFP+ transceiver not expected
+			 *	  SFP Status 0x1 = SFP+ transceiver not present
+			 *	  SFP Status 0x2 = SFP+ transceiver invalid
+			 *	  SFP Status 0x3 = SFP+ transceiver present and
+			 *	  valid
+			 *	Bits 12-14 = Heartbeat Counter
+			 *	Bit 15     = Heartbeat Monitor Enable
+			 *	Bits 16-17 = SFP Additional Info
+			 *	  SFP info 0x0 = Unregocnized transceiver for
+			 *	  Ethernet
+			 *	  SFP info 0x1 = SFP+ brand validation failed
+			 *	  SFP info 0x2 = SFP+ speed validation failed
+			 *	  SFP info 0x3 = SFP+ access error
+			 *	Bit 18     = SFP Multirate
+			 *	Bit 19     = SFP Tx Fault
+			 *	Bits 20-22 = Link Speed
+			 *	Bits 23-27 = Reserved
+			 *	Bits 28-30 = DCBX Status
+			 *	  DCBX Status 0x0 = DCBX Disabled
+			 *	  DCBX Status 0x1 = DCBX Enabled
+			 *	  DCBX Status 0x2 = DCBX Exchange error
+			 *	Bit 31     = Reserved
+			 */
+			peg_fw_state = (mb[2] & 0x00ff);
+			nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
+			nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
+			sfp_status = ((mb[2] & 0x0c00) >> 10);
+			htbt_counter = ((mb[2] & 0x7000) >> 12);
+			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
+			sfp_additional_info = (mb[6] & 0x0003);
+			sfp_multirate = ((mb[6] & 0x0004) >> 2);
+			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
+			link_speed = ((mb[6] & 0x0070) >> 4);
+			dcbx_status = ((mb[6] & 0x7000) >> 12);
+
+			ql_log(ql_log_warn, vha, 0x5066,
+			    "Peg-to-Fc Status Register:\n"
+			    "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
+			    "nw_interface_signal_detect=0x%x"
+			    "\nsfp_statis=0x%x.\n ", peg_fw_state,
+			    nw_interface_link_up, nw_interface_signal_detect,
+			    sfp_status);
+			ql_log(ql_log_warn, vha, 0x5067,
+			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
+			    "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
+			    htbt_counter, htbt_monitor_enable,
+			    sfp_additional_info, sfp_multirate);
+			ql_log(ql_log_warn, vha, 0x5068,
+			    "sfp_tx_fault=0x%x, link_state=0x%x, "
+			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
+			    dcbx_status);
+
+			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+		}
+
+		if (mb[1] & IDC_HEARTBEAT_FAILURE) {
+			ql_log(ql_log_warn, vha, 0x5069,
+			    "Heartbeat Failure encountered, chip reset "
+			    "required.\n");
+
+			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
+		}
+	}
+
+	if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
+		ql_log(ql_log_info, vha, 0x506a,
+		    "IDC Device-State changed = 0x%x.\n", mb[4]);
+		if (ha->flags.nic_core_reset_owner)
+			return;
+		qla83xx_schedule_work(vha, MBA_IDC_AEN);
+	}
+}
+
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *vp;
+	uint32_t vp_did;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!ha->num_vhosts)
+		return ret;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		vp_did = vp->d_id.b24;
+		if (vp_did == rscn_entry) {
+			ret = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	return ret;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+	fc_port_t *f, *tf;
+
+	f = tf = NULL;
+	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
+		if (f->loop_id == loop_id)
+			return f;
+	return NULL;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
+{
+	fc_port_t *f, *tf;
+
+	f = tf = NULL;
+	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+		if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
+			if (incl_deleted)
+				return f;
+			else if (f->deleted == 0)
+				return f;
+		}
+	}
+	return NULL;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
+	u8 incl_deleted)
+{
+	fc_port_t *f, *tf;
+
+	f = tf = NULL;
+	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+		if (f->d_id.b24 == id->b24) {
+			if (incl_deleted)
+				return f;
+			else if (f->deleted == 0)
+				return f;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * qla2x00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ * @mb: Mailbox registers (0 - 3)
+ */
+void
+qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+{
+	uint16_t	handle_cnt;
+	uint16_t	cnt, mbx;
+	uint32_t	handles[5];
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+	uint32_t	rscn_entry, host_pid;
+	unsigned long	flags;
+	fc_port_t	*fcport = NULL;
+
+	/* Setup to process RIO completion. */
+	handle_cnt = 0;
+	if (IS_CNA_CAPABLE(ha))
+		goto skip_rio;
+	switch (mb[0]) {
+	case MBA_SCSI_COMPLETION:
+		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+		handle_cnt = 1;
+		break;
+	case MBA_CMPLT_1_16BIT:
+		handles[0] = mb[1];
+		handle_cnt = 1;
+		mb[0] = MBA_SCSI_COMPLETION;
+		break;
+	case MBA_CMPLT_2_16BIT:
+		handles[0] = mb[1];
+		handles[1] = mb[2];
+		handle_cnt = 2;
+		mb[0] = MBA_SCSI_COMPLETION;
+		break;
+	case MBA_CMPLT_3_16BIT:
+		handles[0] = mb[1];
+		handles[1] = mb[2];
+		handles[2] = mb[3];
+		handle_cnt = 3;
+		mb[0] = MBA_SCSI_COMPLETION;
+		break;
+	case MBA_CMPLT_4_16BIT:
+		handles[0] = mb[1];
+		handles[1] = mb[2];
+		handles[2] = mb[3];
+		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
+		handle_cnt = 4;
+		mb[0] = MBA_SCSI_COMPLETION;
+		break;
+	case MBA_CMPLT_5_16BIT:
+		handles[0] = mb[1];
+		handles[1] = mb[2];
+		handles[2] = mb[3];
+		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
+		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
+		handle_cnt = 5;
+		mb[0] = MBA_SCSI_COMPLETION;
+		break;
+	case MBA_CMPLT_2_32BIT:
+		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+		handles[1] = le32_to_cpu(
+		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
+		    RD_MAILBOX_REG(ha, reg, 6));
+		handle_cnt = 2;
+		mb[0] = MBA_SCSI_COMPLETION;
+		break;
+	default:
+		break;
+	}
+skip_rio:
+	switch (mb[0]) {
+	case MBA_SCSI_COMPLETION:	/* Fast Post */
+		if (!vha->flags.online)
+			break;
+
+		for (cnt = 0; cnt < handle_cnt; cnt++)
+			qla2x00_process_completed_request(vha, rsp->req,
+				handles[cnt]);
+		break;
+
+	case MBA_RESET:			/* Reset */
+		ql_dbg(ql_dbg_async, vha, 0x5002,
+		    "Asynchronous RESET.\n");
+
+		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_SYSTEM_ERR:		/* System Error */
+		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
+			RD_REG_WORD(&reg24->mailbox7) : 0;
+		ql_log(ql_log_warn, vha, 0x5003,
+		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
+		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
+
+		ha->isp_ops->fw_dump(vha, 1);
+		ha->flags.fw_init_done = 0;
+		QLA_FW_STOPPED(ha);
+
+		if (IS_FWI2_CAPABLE(ha)) {
+			if (mb[1] == 0 && mb[2] == 0) {
+				ql_log(ql_log_fatal, vha, 0x5004,
+				    "Unrecoverable Hardware Error: adapter "
+				    "marked OFFLINE!\n");
+				vha->flags.online = 0;
+				vha->device_flags |= DFLG_DEV_FAILED;
+			} else {
+				/* Check to see if MPI timeout occurred */
+				if ((mbx & MBX_3) && (ha->port_no == 0))
+					set_bit(MPI_RESET_NEEDED,
+					    &vha->dpc_flags);
+
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			}
+		} else if (mb[1] == 0) {
+			ql_log(ql_log_fatal, vha, 0x5005,
+			    "Unrecoverable Hardware Error: adapter marked "
+			    "OFFLINE!\n");
+			vha->flags.online = 0;
+			vha->device_flags |= DFLG_DEV_FAILED;
+		} else
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
+		ql_log(ql_log_warn, vha, 0x5006,
+		    "ISP Request Transfer Error (%x).\n",  mb[1]);
+
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
+		ql_log(ql_log_warn, vha, 0x5007,
+		    "ISP Response Transfer Error (%x).\n", mb[1]);
+
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
+		ql_dbg(ql_dbg_async, vha, 0x5008,
+		    "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
+		break;
+
+	case MBA_LOOP_INIT_ERR:
+		ql_log(ql_log_warn, vha, 0x5090,
+		    "LOOP INIT ERROR (%x).\n", mb[1]);
+		ha->isp_ops->fw_dump(vha, 1);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
+		ha->flags.lip_ae = 1;
+		ha->flags.n2n_ae = 0;
+
+		ql_dbg(ql_dbg_async, vha, 0x5009,
+		    "LIP occurred (%x).\n", mb[1]);
+
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			qla2x00_mark_all_devices_lost(vha, 1);
+		}
+
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		}
+
+		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+
+		vha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
+		break;
+
+	case MBA_LOOP_UP:		/* Loop Up Event */
+		if (IS_QLA2100(ha) || IS_QLA2200(ha))
+			ha->link_data_rate = PORT_SPEED_1GB;
+		else
+			ha->link_data_rate = mb[1];
+
+		ql_log(ql_log_info, vha, 0x500a,
+		    "LOOP UP detected (%s Gbps).\n",
+		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
+
+		vha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
+
+		if (AUTO_DETECT_SFP_SUPPORT(vha)) {
+			set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		break;
+
+	case MBA_LOOP_DOWN:		/* Loop Down Event */
+		ha->flags.n2n_ae = 0;
+		ha->flags.lip_ae = 0;
+		ha->current_topology = 0;
+
+		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
+			? RD_REG_WORD(&reg24->mailbox4) : 0;
+		mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+			: mbx;
+		ql_log(ql_log_info, vha, 0x500b,
+		    "LOOP DOWN detected (%x %x %x %x).\n",
+		    mb[1], mb[2], mb[3], mbx);
+
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			/*
+			 * In case of loop down, restore WWPN from
+			 * NVRAM in case of FA-WWPN capable ISP
+			 * Restore for Physical Port only
+			 */
+			if (!vha->vp_idx) {
+				if (ha->flags.fawwpn_enabled) {
+					void *wwpn = ha->init_cb->port_name;
+					memcpy(vha->port_name, wwpn, WWN_SIZE);
+					fc_host_port_name(vha->host) =
+					    wwn_to_u64(vha->port_name);
+					ql_dbg(ql_dbg_init + ql_dbg_verbose,
+					    vha, 0x00d8, "LOOP DOWN detected,"
+					    "restore WWPN %016llx\n",
+					    wwn_to_u64(vha->port_name));
+				}
+
+				clear_bit(VP_CONFIG_OK, &vha->vp_flags);
+			}
+
+			vha->device_flags |= DFLG_NO_CABLE;
+			qla2x00_mark_all_devices_lost(vha, 1);
+		}
+
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		}
+
+		vha->flags.management_server_logged_in = 0;
+		ha->link_data_rate = PORT_SPEED_UNKNOWN;
+		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
+		break;
+
+	case MBA_LIP_RESET:		/* LIP reset occurred */
+		ql_dbg(ql_dbg_async, vha, 0x500c,
+		    "LIP reset occurred (%x).\n", mb[1]);
+
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			qla2x00_mark_all_devices_lost(vha, 1);
+		}
+
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		}
+
+		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+		ha->operating_mode = LOOP;
+		vha->flags.management_server_logged_in = 0;
+		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
+		break;
+
+	/* case MBA_DCBX_COMPLETE: */
+	case MBA_POINT_TO_POINT:	/* Point-to-Point */
+		ha->flags.lip_ae = 0;
+		ha->flags.n2n_ae = 1;
+
+		if (IS_QLA2100(ha))
+			break;
+
+		if (IS_CNA_CAPABLE(ha)) {
+			ql_dbg(ql_dbg_async, vha, 0x500d,
+			    "DCBX Completed -- %04x %04x %04x.\n",
+			    mb[1], mb[2], mb[3]);
+			if (ha->notify_dcbx_comp && !vha->vp_idx)
+				complete(&ha->dcbx_comp);
+
+		} else
+			ql_dbg(ql_dbg_async, vha, 0x500e,
+			    "Asynchronous P2P MODE received.\n");
+
+		/*
+		 * Until there's a transition from loop down to loop up, treat
+		 * this as loop down only.
+		 */
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			if (!atomic_read(&vha->loop_down_timer))
+				atomic_set(&vha->loop_down_timer,
+				    LOOP_DOWN_TIME);
+			qla2x00_mark_all_devices_lost(vha, 1);
+		}
+
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		}
+
+		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
+			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+
+		ha->flags.gpsc_supported = 1;
+		vha->flags.management_server_logged_in = 0;
+		break;
+
+	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
+		if (IS_QLA2100(ha))
+			break;
+
+		ql_dbg(ql_dbg_async, vha, 0x500f,
+		    "Configuration change detected: value=%x.\n", mb[1]);
+
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+			atomic_set(&vha->loop_state, LOOP_DOWN);
+			if (!atomic_read(&vha->loop_down_timer))
+				atomic_set(&vha->loop_down_timer,
+				    LOOP_DOWN_TIME);
+			qla2x00_mark_all_devices_lost(vha, 1);
+		}
+
+		if (vha->vp_idx) {
+			atomic_set(&vha->vp_state, VP_FAILED);
+			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		}
+
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+		break;
+
+	case MBA_PORT_UPDATE:		/* Port database update */
+		/*
+		 * Handle only global and vn-port update events
+		 *
+		 * Relevant inputs:
+		 * mb[1] = N_Port handle of changed port
+		 * OR 0xffff for global event
+		 * mb[2] = New login state
+		 * 7 = Port logged out
+		 * mb[3] = LSB is vp_idx, 0xff = all vps
+		 *
+		 * Skip processing if:
+		 *       Event is global, vp_idx is NOT all vps,
+		 *           vp_idx does not match
+		 *       Event is not global, vp_idx does not match
+		 */
+		if (IS_QLA2XXX_MIDTYPE(ha) &&
+		    ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
+			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
+			break;
+
+		if (mb[2] == 0x7) {
+			ql_dbg(ql_dbg_async, vha, 0x5010,
+			    "Port %s %04x %04x %04x.\n",
+			    mb[1] == 0xffff ? "unavailable" : "logout",
+			    mb[1], mb[2], mb[3]);
+
+			if (mb[1] == 0xffff)
+				goto global_port_update;
+
+			if (mb[1] == NPH_SNS_LID(ha)) {
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+				break;
+			}
+
+			/* use handle_cnt for loop id/nport handle */
+			if (IS_FWI2_CAPABLE(ha))
+				handle_cnt = NPH_SNS;
+			else
+				handle_cnt = SIMPLE_NAME_SERVER;
+			if (mb[1] == handle_cnt) {
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+				break;
+			}
+
+			/* Port logout */
+			fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
+			if (!fcport)
+				break;
+			if (atomic_read(&fcport->state) != FCS_ONLINE)
+				break;
+			ql_dbg(ql_dbg_async, vha, 0x508a,
+			    "Marking port lost loopid=%04x portid=%06x.\n",
+			    fcport->loop_id, fcport->d_id.b24);
+			if (qla_ini_mode_enabled(vha)) {
+				qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+				fcport->logout_on_delete = 0;
+				qlt_schedule_sess_for_deletion_lock(fcport);
+			}
+			break;
+
+global_port_update:
+			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+				atomic_set(&vha->loop_state, LOOP_DOWN);
+				atomic_set(&vha->loop_down_timer,
+				    LOOP_DOWN_TIME);
+				vha->device_flags |= DFLG_NO_CABLE;
+				qla2x00_mark_all_devices_lost(vha, 1);
+			}
+
+			if (vha->vp_idx) {
+				atomic_set(&vha->vp_state, VP_FAILED);
+				fc_vport_set_state(vha->fc_vport,
+				    FC_VPORT_FAILED);
+				qla2x00_mark_all_devices_lost(vha, 1);
+			}
+
+			vha->flags.management_server_logged_in = 0;
+			ha->link_data_rate = PORT_SPEED_UNKNOWN;
+			break;
+		}
+
+		/*
+		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
+		 * event etc. earlier indicating loop is down) then process
+		 * it.  Otherwise ignore it and Wait for RSCN to come in.
+		 */
+		atomic_set(&vha->loop_down_timer, 0);
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
+			ql_dbg(ql_dbg_async, vha, 0x5011,
+			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
+			    mb[1], mb[2], mb[3]);
+			break;
+		}
+
+		ql_dbg(ql_dbg_async, vha, 0x5012,
+		    "Port database changed %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+
+		/*
+		 * Mark all devices as missing so we will login again.
+		 */
+		atomic_set(&vha->loop_state, LOOP_UP);
+
+		qla2x00_mark_all_devices_lost(vha, 1);
+
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+		set_bit(VP_CONFIG_OK, &vha->vp_flags);
+		break;
+
+	case MBA_RSCN_UPDATE:		/* State Change Registration */
+		/* Check if the Vport has issued a SCR */
+		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
+			break;
+		/* Only handle SCNs for our Vport index. */
+		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
+			break;
+
+		ql_dbg(ql_dbg_async, vha, 0x5013,
+		    "RSCN database changed -- %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+
+		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
+		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
+				| vha->d_id.b.al_pa;
+		if (rscn_entry == host_pid) {
+			ql_dbg(ql_dbg_async, vha, 0x5014,
+			    "Ignoring RSCN update to local host "
+			    "port ID (%06x).\n", host_pid);
+			break;
+		}
+
+		/* Ignore reserved bits from RSCN-payload. */
+		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
+
+		/* Skip RSCNs for virtual ports on the same physical port */
+		if (qla2x00_is_a_vp_did(vha, rscn_entry))
+			break;
+
+		atomic_set(&vha->loop_down_timer, 0);
+		vha->flags.management_server_logged_in = 0;
+		{
+			struct event_arg ea;
+
+			memset(&ea, 0, sizeof(ea));
+			ea.event = FCME_RSCN;
+			ea.id.b24 = rscn_entry;
+			ea.id.b.rsvd_1 = rscn_entry >> 24;
+			qla2x00_fcport_event_handler(vha, &ea);
+			qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
+		}
+		break;
+	/* case MBA_RIO_RESPONSE: */
+	case MBA_ZIO_RESPONSE:
+		ql_dbg(ql_dbg_async, vha, 0x5015,
+		    "[R|Z]IO update completion.\n");
+
+		if (IS_FWI2_CAPABLE(ha))
+			qla24xx_process_response_queue(vha, rsp);
+		else
+			qla2x00_process_response_queue(rsp);
+		break;
+
+	case MBA_DISCARD_RND_FRAME:
+		ql_dbg(ql_dbg_async, vha, 0x5016,
+		    "Discard RND Frame -- %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+		break;
+
+	case MBA_TRACE_NOTIFICATION:
+		ql_dbg(ql_dbg_async, vha, 0x5017,
+		    "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
+		break;
+
+	case MBA_ISP84XX_ALERT:
+		ql_dbg(ql_dbg_async, vha, 0x5018,
+		    "ISP84XX Alert Notification -- %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+
+		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
+		switch (mb[1]) {
+		case A84_PANIC_RECOVERY:
+			ql_log(ql_log_info, vha, 0x5019,
+			    "Alert 84XX: panic recovery %04x %04x.\n",
+			    mb[2], mb[3]);
+			break;
+		case A84_OP_LOGIN_COMPLETE:
+			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
+			ql_log(ql_log_info, vha, 0x501a,
+			    "Alert 84XX: firmware version %x.\n",
+			    ha->cs84xx->op_fw_version);
+			break;
+		case A84_DIAG_LOGIN_COMPLETE:
+			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
+			ql_log(ql_log_info, vha, 0x501b,
+			    "Alert 84XX: diagnostic firmware version %x.\n",
+			    ha->cs84xx->diag_fw_version);
+			break;
+		case A84_GOLD_LOGIN_COMPLETE:
+			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
+			ha->cs84xx->fw_update = 1;
+			ql_log(ql_log_info, vha, 0x501c,
+			    "Alert 84XX: gold firmware version %x.\n",
+			    ha->cs84xx->gold_fw_version);
+			break;
+		default:
+			ql_log(ql_log_warn, vha, 0x501d,
+			    "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
+			    mb[1], mb[2], mb[3]);
+		}
+		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
+		break;
+	case MBA_DCBX_START:
+		ql_dbg(ql_dbg_async, vha, 0x501e,
+		    "DCBX Started -- %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+		break;
+	case MBA_DCBX_PARAM_UPDATE:
+		ql_dbg(ql_dbg_async, vha, 0x501f,
+		    "DCBX Parameters Updated -- %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+		break;
+	case MBA_FCF_CONF_ERR:
+		ql_dbg(ql_dbg_async, vha, 0x5020,
+		    "FCF Configuration Error -- %04x %04x %04x.\n",
+		    mb[1], mb[2], mb[3]);
+		break;
+	case MBA_IDC_NOTIFY:
+		if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
+			mb[4] = RD_REG_WORD(&reg24->mailbox4);
+			if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+			    (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+			    (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
+				set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+				/*
+				 * Extend loop down timer since port is active.
+				 */
+				if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+					atomic_set(&vha->loop_down_timer,
+					    LOOP_DOWN_TIME);
+				qla2xxx_wake_dpc(vha);
+			}
+		}
+	case MBA_IDC_COMPLETE:
+		if (ha->notify_lb_portup_comp && !vha->vp_idx)
+			complete(&ha->lb_portup_comp);
+		/* Fallthru */
+	case MBA_IDC_TIME_EXT:
+		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
+		    IS_QLA8044(ha))
+			qla81xx_idc_event(vha, mb[0], mb[1]);
+		break;
+
+	case MBA_IDC_AEN:
+		mb[4] = RD_REG_WORD(&reg24->mailbox4);
+		mb[5] = RD_REG_WORD(&reg24->mailbox5);
+		mb[6] = RD_REG_WORD(&reg24->mailbox6);
+		mb[7] = RD_REG_WORD(&reg24->mailbox7);
+		qla83xx_handle_8200_aen(vha, mb);
+		break;
+
+	case MBA_DPORT_DIAGNOSTICS:
+		ql_dbg(ql_dbg_async, vha, 0x5052,
+		    "D-Port Diagnostics: %04x result=%s\n",
+		    mb[0],
+		    mb[1] == 0 ? "start" :
+		    mb[1] == 1 ? "done (pass)" :
+		    mb[1] == 2 ? "done (error)" : "other");
+		break;
+
+	case MBA_TEMPERATURE_ALERT:
+		ql_dbg(ql_dbg_async, vha, 0x505e,
+		    "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
+		if (mb[1] == 0x12)
+			schedule_work(&ha->board_disable);
+		break;
+
+	case MBA_TRANS_INSERT:
+		ql_dbg(ql_dbg_async, vha, 0x5091,
+		    "Transceiver Insertion: %04x\n", mb[1]);
+		break;
+
+	default:
+		ql_dbg(ql_dbg_async, vha, 0x5057,
+		    "Unknown AEN:%04x %04x %04x %04x\n",
+		    mb[0], mb[1], mb[2], mb[3]);
+	}
+
+	qlt_async_event(mb[0], vha, mb);
+
+	if (!vha->vp_idx && ha->num_vhosts)
+		qla2x00_alert_all_vps(rsp, mb);
+}
+
+/**
+ * qla2x00_process_completed_request() - Process a Fast Post response.
+ * @ha: SCSI driver HA context
+ * @index: SRB index
+ */
+void
+qla2x00_process_completed_request(struct scsi_qla_host *vha,
+				  struct req_que *req, uint32_t index)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Validate handle. */
+	if (index >= req->num_outstanding_cmds) {
+		ql_log(ql_log_warn, vha, 0x3014,
+		    "Invalid SCSI command index (%x).\n", index);
+
+		if (IS_P3P_TYPE(ha))
+			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+		else
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		return;
+	}
+
+	sp = req->outstanding_cmds[index];
+	if (sp) {
+		/* Free outstanding command slot. */
+		req->outstanding_cmds[index] = NULL;
+
+		/* Save ISP completion status */
+		sp->done(sp, DID_OK << 16);
+	} else {
+		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
+
+		if (IS_P3P_TYPE(ha))
+			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+		else
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	}
+}
+
+srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+    struct req_que *req, void *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	sts_entry_t *pkt = iocb;
+	srb_t *sp = NULL;
+	uint16_t index;
+
+	index = LSW(pkt->handle);
+	if (index >= req->num_outstanding_cmds) {
+		ql_log(ql_log_warn, vha, 0x5031,
+			   "Invalid command index (%x) type %8ph.\n",
+			   index, iocb);
+		if (IS_P3P_TYPE(ha))
+			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+		else
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		goto done;
+	}
+	sp = req->outstanding_cmds[index];
+	if (!sp) {
+		ql_log(ql_log_warn, vha, 0x5032,
+		    "Invalid completion handle (%x) -- timed-out.\n", index);
+		return sp;
+	}
+	if (sp->handle != index) {
+		ql_log(ql_log_warn, vha, 0x5033,
+		    "SRB handle (%x) mismatch %x.\n", sp->handle, index);
+		return NULL;
+	}
+
+	req->outstanding_cmds[index] = NULL;
+
+done:
+	return sp;
+}
+
+static void
+qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct mbx_entry *mbx)
+{
+	const char func[] = "MBX-IOCB";
+	const char *type;
+	fc_port_t *fcport;
+	srb_t *sp;
+	struct srb_iocb *lio;
+	uint16_t *data;
+	uint16_t status;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
+	if (!sp)
+		return;
+
+	lio = &sp->u.iocb_cmd;
+	type = sp->name;
+	fcport = sp->fcport;
+	data = lio->u.logio.data;
+
+	data[0] = MBS_COMMAND_ERROR;
+	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+	    QLA_LOGIO_LOGIN_RETRIED : 0;
+	if (mbx->entry_status) {
+		ql_dbg(ql_dbg_async, vha, 0x5043,
+		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
+		    "entry-status=%x status=%x state-flag=%x "
+		    "status-flags=%x.\n", type, sp->handle,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa, mbx->entry_status,
+		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
+		    le16_to_cpu(mbx->status_flags));
+
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
+		    (uint8_t *)mbx, sizeof(*mbx));
+
+		goto logio_done;
+	}
+
+	status = le16_to_cpu(mbx->status);
+	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
+	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
+		status = 0;
+	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
+		ql_dbg(ql_dbg_async, vha, 0x5045,
+		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+		    type, sp->handle, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		    le16_to_cpu(mbx->mb1));
+
+		data[0] = MBS_COMMAND_COMPLETE;
+		if (sp->type == SRB_LOGIN_CMD) {
+			fcport->port_type = FCT_TARGET;
+			if (le16_to_cpu(mbx->mb1) & BIT_0)
+				fcport->port_type = FCT_INITIATOR;
+			else if (le16_to_cpu(mbx->mb1) & BIT_1)
+				fcport->flags |= FCF_FCP2_DEVICE;
+		}
+		goto logio_done;
+	}
+
+	data[0] = le16_to_cpu(mbx->mb0);
+	switch (data[0]) {
+	case MBS_PORT_ID_USED:
+		data[1] = le16_to_cpu(mbx->mb1);
+		break;
+	case MBS_LOOP_ID_USED:
+		break;
+	default:
+		data[0] = MBS_COMMAND_ERROR;
+		break;
+	}
+
+	ql_log(ql_log_warn, vha, 0x5046,
+	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
+	    le16_to_cpu(mbx->mb7));
+
+logio_done:
+	sp->done(sp, 0);
+}
+
+static void
+qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct mbx_24xx_entry *pkt)
+{
+	const char func[] = "MBX-IOCB2";
+	srb_t *sp;
+	struct srb_iocb *si;
+	u16 sz, i;
+	int res;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	si = &sp->u.iocb_cmd;
+	sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
+
+	for (i = 0; i < sz; i++)
+		si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
+
+	res = (si->u.mbx.in_mb[0] & MBS_MASK);
+
+	sp->done(sp, res);
+}
+
+static void
+qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct nack_to_isp *pkt)
+{
+	const char func[] = "nack";
+	srb_t *sp;
+	int res = 0;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
+		res = QLA_FUNCTION_FAILED;
+
+	sp->done(sp, res);
+}
+
+static void
+qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+    sts_entry_t *pkt, int iocb_type)
+{
+	const char func[] = "CT_IOCB";
+	const char *type;
+	srb_t *sp;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_reply *bsg_reply;
+	uint16_t comp_status;
+	int res = 0;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	switch (sp->type) {
+	case SRB_CT_CMD:
+	    bsg_job = sp->u.bsg_job;
+	    bsg_reply = bsg_job->reply;
+
+	    type = "ct pass-through";
+
+	    comp_status = le16_to_cpu(pkt->comp_status);
+
+	    /*
+	     * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+	     * fc payload  to the caller
+	     */
+	    bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	    bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+	    if (comp_status != CS_COMPLETE) {
+		    if (comp_status == CS_DATA_UNDERRUN) {
+			    res = DID_OK << 16;
+			    bsg_reply->reply_payload_rcv_len =
+				le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+
+			    ql_log(ql_log_warn, vha, 0x5048,
+				"CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
+				type, comp_status,
+				bsg_reply->reply_payload_rcv_len);
+		    } else {
+			    ql_log(ql_log_warn, vha, 0x5049,
+				"CT pass-through-%s error comp_status=0x%x.\n",
+				type, comp_status);
+			    res = DID_ERROR << 16;
+			    bsg_reply->reply_payload_rcv_len = 0;
+		    }
+		    ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
+			(uint8_t *)pkt, sizeof(*pkt));
+	    } else {
+		    res = DID_OK << 16;
+		    bsg_reply->reply_payload_rcv_len =
+			bsg_job->reply_payload.payload_len;
+		    bsg_job->reply_len = 0;
+	    }
+	    break;
+	case SRB_CT_PTHRU_CMD:
+	    /*
+	     * borrowing sts_entry_24xx.comp_status.
+	     * same location as ct_entry_24xx.comp_status
+	     */
+	     res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+		 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+		 sp->name);
+	     break;
+	}
+
+	sp->done(sp, res);
+}
+
+static void
+qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct sts_entry_24xx *pkt, int iocb_type)
+{
+	const char func[] = "ELS_CT_IOCB";
+	const char *type;
+	srb_t *sp;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_reply *bsg_reply;
+	uint16_t comp_status;
+	uint32_t fw_status[3];
+	uint8_t* fw_sts_ptr;
+	int res;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	type = NULL;
+	switch (sp->type) {
+	case SRB_ELS_CMD_RPT:
+	case SRB_ELS_CMD_HST:
+		type = "els";
+		break;
+	case SRB_CT_CMD:
+		type = "ct pass-through";
+		break;
+	case SRB_ELS_DCMD:
+		type = "Driver ELS logo";
+		ql_dbg(ql_dbg_user, vha, 0x5047,
+		    "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
+		sp->done(sp, 0);
+		return;
+	case SRB_CT_PTHRU_CMD:
+		/* borrowing sts_entry_24xx.comp_status.
+		   same location as ct_entry_24xx.comp_status
+		 */
+		res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
+			(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+			sp->name);
+		sp->done(sp, res);
+		return;
+	default:
+		ql_dbg(ql_dbg_user, vha, 0x503e,
+		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
+		return;
+	}
+
+	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
+	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+
+	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+	 * fc payload  to the caller
+	 */
+	bsg_job = sp->u.bsg_job;
+	bsg_reply = bsg_job->reply;
+	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
+
+	if (comp_status != CS_COMPLETE) {
+		if (comp_status == CS_DATA_UNDERRUN) {
+			res = DID_OK << 16;
+			bsg_reply->reply_payload_rcv_len =
+			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
+
+			ql_dbg(ql_dbg_user, vha, 0x503f,
+			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
+			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
+			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
+			    le16_to_cpu(((struct els_sts_entry_24xx *)
+				pkt)->total_byte_count));
+			fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
+				sizeof(struct fc_bsg_reply);
+			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+		}
+		else {
+			ql_dbg(ql_dbg_user, vha, 0x5040,
+			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
+			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
+			    type, sp->handle, comp_status,
+			    le16_to_cpu(((struct els_sts_entry_24xx *)
+				pkt)->error_subcode_1),
+			    le16_to_cpu(((struct els_sts_entry_24xx *)
+				    pkt)->error_subcode_2));
+			res = DID_ERROR << 16;
+			bsg_reply->reply_payload_rcv_len = 0;
+			fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
+					sizeof(struct fc_bsg_reply);
+			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+		}
+		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
+				(uint8_t *)pkt, sizeof(*pkt));
+	}
+	else {
+		res =  DID_OK << 16;
+		bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+		bsg_job->reply_len = 0;
+	}
+
+	sp->done(sp, res);
+}
+
+static void
+qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct logio_entry_24xx *logio)
+{
+	const char func[] = "LOGIO-IOCB";
+	const char *type;
+	fc_port_t *fcport;
+	srb_t *sp;
+	struct srb_iocb *lio;
+	uint16_t *data;
+	uint32_t iop[2];
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
+	if (!sp)
+		return;
+
+	lio = &sp->u.iocb_cmd;
+	type = sp->name;
+	fcport = sp->fcport;
+	data = lio->u.logio.data;
+
+	data[0] = MBS_COMMAND_ERROR;
+	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
+		QLA_LOGIO_LOGIN_RETRIED : 0;
+	if (logio->entry_status) {
+		ql_log(ql_log_warn, fcport->vha, 0x5034,
+		    "Async-%s error entry - %8phC hdl=%x"
+		    "portid=%02x%02x%02x entry-status=%x.\n",
+		    type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		    logio->entry_status);
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
+		    (uint8_t *)logio, sizeof(*logio));
+
+		goto logio_done;
+	}
+
+	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
+		ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
+		    "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+		    "iop0=%x.\n", type, fcport->port_name, sp->handle,
+		    fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+		    le32_to_cpu(logio->io_parameter[0]));
+
+		vha->hw->exch_starvation = 0;
+		data[0] = MBS_COMMAND_COMPLETE;
+		if (sp->type != SRB_LOGIN_CMD)
+			goto logio_done;
+
+		iop[0] = le32_to_cpu(logio->io_parameter[0]);
+		if (iop[0] & BIT_4) {
+			fcport->port_type = FCT_TARGET;
+			if (iop[0] & BIT_8)
+				fcport->flags |= FCF_FCP2_DEVICE;
+		} else if (iop[0] & BIT_5)
+			fcport->port_type = FCT_INITIATOR;
+
+		if (iop[0] & BIT_7)
+			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
+		if (logio->io_parameter[7] || logio->io_parameter[8])
+			fcport->supported_classes |= FC_COS_CLASS2;
+		if (logio->io_parameter[9] || logio->io_parameter[10])
+			fcport->supported_classes |= FC_COS_CLASS3;
+
+		goto logio_done;
+	}
+
+	iop[0] = le32_to_cpu(logio->io_parameter[0]);
+	iop[1] = le32_to_cpu(logio->io_parameter[1]);
+	lio->u.logio.iop[0] = iop[0];
+	lio->u.logio.iop[1] = iop[1];
+	switch (iop[0]) {
+	case LSC_SCODE_PORTID_USED:
+		data[0] = MBS_PORT_ID_USED;
+		data[1] = LSW(iop[1]);
+		break;
+	case LSC_SCODE_NPORT_USED:
+		data[0] = MBS_LOOP_ID_USED;
+		break;
+	case LSC_SCODE_CMD_FAILED:
+		if (iop[1] == 0x0606) {
+			/*
+			 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+			 * Target side acked.
+			 */
+			data[0] = MBS_COMMAND_COMPLETE;
+			goto logio_done;
+		}
+		data[0] = MBS_COMMAND_ERROR;
+		break;
+	case LSC_SCODE_NOXCB:
+		vha->hw->exch_starvation++;
+		if (vha->hw->exch_starvation > 5) {
+			ql_log(ql_log_warn, vha, 0xd046,
+			    "Exchange starvation. Resetting RISC\n");
+
+			vha->hw->exch_starvation = 0;
+
+			if (IS_P3P_TYPE(vha->hw))
+				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+			else
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		/* drop through */
+	default:
+		data[0] = MBS_COMMAND_ERROR;
+		break;
+	}
+
+	ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
+	    "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+	    "iop0=%x iop1=%x.\n", type, fcport->port_name,
+		sp->handle, fcport->d_id.b.domain,
+	    fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    le16_to_cpu(logio->comp_status),
+	    le32_to_cpu(logio->io_parameter[0]),
+	    le32_to_cpu(logio->io_parameter[1]));
+
+logio_done:
+	sp->done(sp, 0);
+}
+
+static void
+qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+{
+	const char func[] = "TMF-IOCB";
+	const char *type;
+	fc_port_t *fcport;
+	srb_t *sp;
+	struct srb_iocb *iocb;
+	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+	if (!sp)
+		return;
+
+	iocb = &sp->u.iocb_cmd;
+	type = sp->name;
+	fcport = sp->fcport;
+	iocb->u.tmf.data = QLA_SUCCESS;
+
+	if (sts->entry_status) {
+		ql_log(ql_log_warn, fcport->vha, 0x5038,
+		    "Async-%s error - hdl=%x entry-status(%x).\n",
+		    type, sp->handle, sts->entry_status);
+		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
+		ql_log(ql_log_warn, fcport->vha, 0x5039,
+		    "Async-%s error - hdl=%x completion status(%x).\n",
+		    type, sp->handle, sts->comp_status);
+		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+	} else if ((le16_to_cpu(sts->scsi_status) &
+	    SS_RESPONSE_INFO_LEN_VALID)) {
+		if (le32_to_cpu(sts->rsp_data_len) < 4) {
+			ql_log(ql_log_warn, fcport->vha, 0x503b,
+			    "Async-%s error - hdl=%x not enough response(%d).\n",
+			    type, sp->handle, sts->rsp_data_len);
+		} else if (sts->data[3]) {
+			ql_log(ql_log_warn, fcport->vha, 0x503c,
+			    "Async-%s error - hdl=%x response(%x).\n",
+			    type, sp->handle, sts->data[3]);
+			iocb->u.tmf.data = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	if (iocb->u.tmf.data != QLA_SUCCESS)
+		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
+		    (uint8_t *)sts, sizeof(*sts));
+
+	sp->done(sp, 0);
+}
+
+static void
+qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+{
+	const char func[] = "NVME-IOCB";
+	fc_port_t *fcport;
+	srb_t *sp;
+	struct srb_iocb *iocb;
+	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+	uint16_t        state_flags;
+	struct nvmefc_fcp_req *fd;
+	uint16_t        ret = 0;
+	struct srb_iocb *nvme;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+	if (!sp)
+		return;
+
+	iocb = &sp->u.iocb_cmd;
+	fcport = sp->fcport;
+	iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+	state_flags  = le16_to_cpu(sts->state_flags);
+	fd = iocb->u.nvme.desc;
+	nvme = &sp->u.iocb_cmd;
+
+	if (unlikely(nvme->u.nvme.aen_op))
+		atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
+
+	/*
+	 * State flags: Bit 6 and 0.
+	 * If 0 is set, we don't care about 6.
+	 * both cases resp was dma'd to host buffer
+	 * if both are 0, that is good path case.
+	 * if six is set and 0 is clear, we need to
+	 * copy resp data from status iocb to resp buffer.
+	 */
+	if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
+		iocb->u.nvme.rsp_pyld_len = 0;
+	} else if ((state_flags & SF_FCP_RSP_DMA)) {
+		iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+	} else if (state_flags & SF_NVME_ERSP) {
+		uint32_t *inbuf, *outbuf;
+		uint16_t iter;
+
+		inbuf = (uint32_t *)&sts->nvme_ersp_data;
+		outbuf = (uint32_t *)fd->rspaddr;
+		iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+		if (unlikely(iocb->u.nvme.rsp_pyld_len >
+		    sizeof(struct nvme_fc_ersp_iu))) {
+			if (ql_mask_match(ql_dbg_io)) {
+				WARN_ONCE(1, "Unexpected response payload length %u.\n",
+				    iocb->u.nvme.rsp_pyld_len);
+				ql_log(ql_log_warn, fcport->vha, 0x5100,
+				    "Unexpected response payload length %u.\n",
+				    iocb->u.nvme.rsp_pyld_len);
+			}
+			iocb->u.nvme.rsp_pyld_len =
+			    sizeof(struct nvme_fc_ersp_iu);
+		}
+		iter = iocb->u.nvme.rsp_pyld_len >> 2;
+		for (; iter; iter--)
+			*outbuf++ = swab32(*inbuf++);
+	} else { /* unhandled case */
+	    ql_log(ql_log_warn, fcport->vha, 0x503a,
+		"NVME-%s error. Unhandled state_flags of %x\n",
+		sp->name, state_flags);
+	}
+
+	fd->transferred_length = fd->payload_length -
+	    le32_to_cpu(sts->residual_len);
+
+	/*
+	 * If transport error then Failure (HBA rejects request)
+	 * otherwise transport will handle.
+	 */
+	if (sts->entry_status) {
+		ql_log(ql_log_warn, fcport->vha, 0x5038,
+		    "NVME-%s error - hdl=%x entry-status(%x).\n",
+		    sp->name, sp->handle, sts->entry_status);
+		ret = QLA_FUNCTION_FAILED;
+	} else  {
+		switch (le16_to_cpu(sts->comp_status)) {
+			case CS_COMPLETE:
+				ret = 0;
+			break;
+
+			case CS_ABORTED:
+			case CS_RESET:
+			case CS_PORT_UNAVAILABLE:
+			case CS_PORT_LOGGED_OUT:
+			case CS_PORT_BUSY:
+				ql_log(ql_log_warn, fcport->vha, 0x5060,
+				"NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
+				sp->name, sp->handle, sts->comp_status,
+				le32_to_cpu(sts->residual_len), sts->ox_id);
+				fd->transferred_length = fd->payload_length;
+				ret = QLA_ABORTED;
+			break;
+
+			default:
+				ql_log(ql_log_warn, fcport->vha, 0x5060,
+				"NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
+				sp->name, sp->handle, sts->comp_status,
+				le32_to_cpu(sts->residual_len), sts->ox_id);
+				ret = QLA_FUNCTION_FAILED;
+				break;
+		}
+	}
+	sp->done(sp, ret);
+}
+
+/**
+ * qla2x00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qla2x00_process_response_queue(struct rsp_que *rsp)
+{
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha = rsp->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	sts_entry_t	*pkt;
+	uint16_t        handle_cnt;
+	uint16_t        cnt;
+
+	vha = pci_get_drvdata(ha->pdev);
+
+	if (!vha->flags.online)
+		return;
+
+	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+		pkt = (sts_entry_t *)rsp->ring_ptr;
+
+		rsp->ring_index++;
+		if (rsp->ring_index == rsp->length) {
+			rsp->ring_index = 0;
+			rsp->ring_ptr = rsp->ring;
+		} else {
+			rsp->ring_ptr++;
+		}
+
+		if (pkt->entry_status != 0) {
+			qla2x00_error_entry(vha, rsp, pkt);
+			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+			wmb();
+			continue;
+		}
+
+		switch (pkt->entry_type) {
+		case STATUS_TYPE:
+			qla2x00_status_entry(vha, rsp, pkt);
+			break;
+		case STATUS_TYPE_21:
+			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
+			for (cnt = 0; cnt < handle_cnt; cnt++) {
+				qla2x00_process_completed_request(vha, rsp->req,
+				    ((sts21_entry_t *)pkt)->handle[cnt]);
+			}
+			break;
+		case STATUS_TYPE_22:
+			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
+			for (cnt = 0; cnt < handle_cnt; cnt++) {
+				qla2x00_process_completed_request(vha, rsp->req,
+				    ((sts22_entry_t *)pkt)->handle[cnt]);
+			}
+			break;
+		case STATUS_CONT_TYPE:
+			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+			break;
+		case MBX_IOCB_TYPE:
+			qla2x00_mbx_iocb_entry(vha, rsp->req,
+			    (struct mbx_entry *)pkt);
+			break;
+		case CT_IOCB_TYPE:
+			qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+			break;
+		default:
+			/* Type Not Supported. */
+			ql_log(ql_log_warn, vha, 0x504a,
+			    "Received unknown response pkt type %x "
+			    "entry status=%x.\n",
+			    pkt->entry_type, pkt->entry_status);
+			break;
+		}
+		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+		wmb();
+	}
+
+	/* Adjust ring index */
+	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
+}
+
+static inline void
+qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+		     uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+	struct scsi_qla_host *vha = sp->vha;
+	struct scsi_cmnd *cp = GET_CMD_SP(sp);
+	uint32_t track_sense_len;
+
+	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+		sense_len = SCSI_SENSE_BUFFERSIZE;
+
+	SET_CMD_SENSE_LEN(sp, sense_len);
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+	track_sense_len = sense_len;
+
+	if (sense_len > par_sense_len)
+		sense_len = par_sense_len;
+
+	memcpy(cp->sense_buffer, sense_data, sense_len);
+
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+	track_sense_len -= sense_len;
+	SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+	if (track_sense_len != 0) {
+		rsp->status_srb = sp;
+		cp->result = res;
+	}
+
+	if (sense_len) {
+		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+		    "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
+		    sp->vha->host_no, cp->device->id, cp->device->lun,
+		    cp);
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
+		    cp->sense_buffer, sense_len);
+	}
+}
+
+struct scsi_dif_tuple {
+	__be16 guard;       /* Checksum */
+	__be16 app_tag;         /* APPL identifier */
+	__be32 ref_tag;         /* Target LBA or indirect LBA */
+};
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA. In case of errors, we set the
+ * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
+ * to indicate to the kernel that the HBA detected error.
+ */
+static inline int
+qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
+{
+	struct scsi_qla_host *vha = sp->vha;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	uint8_t		*ap = &sts24->data[12];
+	uint8_t		*ep = &sts24->data[20];
+	uint32_t	e_ref_tag, a_ref_tag;
+	uint16_t	e_app_tag, a_app_tag;
+	uint16_t	e_guard, a_guard;
+
+	/*
+	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
+	 * would make guard field appear at offset 2
+	 */
+	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
+	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
+	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
+	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
+	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
+	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+
+	ql_dbg(ql_dbg_io, vha, 0x3023,
+	    "iocb(s) %p Returned STATUS.\n", sts24);
+
+	ql_dbg(ql_dbg_io, vha, 0x3024,
+	    "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
+	    " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
+	    " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
+	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
+	    a_app_tag, e_app_tag, a_guard, e_guard);
+
+	/*
+	 * Ignore sector if:
+	 * For type     3: ref & app tag is all 'f's
+	 * For type 0,1,2: app tag is all 'f's
+	 */
+	if ((a_app_tag == T10_PI_APP_ESCAPE) &&
+	    ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
+	     (a_ref_tag == T10_PI_REF_ESCAPE))) {
+		uint32_t blocks_done, resid;
+		sector_t lba_s = scsi_get_lba(cmd);
+
+		/* 2TB boundary case covered automatically with this */
+		blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
+
+		resid = scsi_bufflen(cmd) - (blocks_done *
+		    cmd->device->sector_size);
+
+		scsi_set_resid(cmd, resid);
+		cmd->result = DID_OK << 16;
+
+		/* Update protection tag */
+		if (scsi_prot_sg_count(cmd)) {
+			uint32_t i, j = 0, k = 0, num_ent;
+			struct scatterlist *sg;
+			struct t10_pi_tuple *spt;
+
+			/* Patch the corresponding protection tags */
+			scsi_for_each_prot_sg(cmd, sg,
+			    scsi_prot_sg_count(cmd), i) {
+				num_ent = sg_dma_len(sg) / 8;
+				if (k + num_ent < blocks_done) {
+					k += num_ent;
+					continue;
+				}
+				j = blocks_done - k - 1;
+				k = blocks_done;
+				break;
+			}
+
+			if (k != blocks_done) {
+				ql_log(ql_log_warn, vha, 0x302f,
+				    "unexpected tag values tag:lba=%x:%llx)\n",
+				    e_ref_tag, (unsigned long long)lba_s);
+				return 1;
+			}
+
+			spt = page_address(sg_page(sg)) + sg->offset;
+			spt += j;
+
+			spt->app_tag = T10_PI_APP_ESCAPE;
+			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
+				spt->ref_tag = T10_PI_REF_ESCAPE;
+		}
+
+		return 0;
+	}
+
+	/* check guard */
+	if (e_guard != a_guard) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+		    0x10, 0x1);
+		set_driver_byte(cmd, DRIVER_SENSE);
+		set_host_byte(cmd, DID_ABORT);
+		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+		return 1;
+	}
+
+	/* check ref tag */
+	if (e_ref_tag != a_ref_tag) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+		    0x10, 0x3);
+		set_driver_byte(cmd, DRIVER_SENSE);
+		set_host_byte(cmd, DID_ABORT);
+		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+		return 1;
+	}
+
+	/* check appl tag */
+	if (e_app_tag != a_app_tag) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+		    0x10, 0x2);
+		set_driver_byte(cmd, DRIVER_SENSE);
+		set_host_byte(cmd, DID_ABORT);
+		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
+		return 1;
+	}
+
+	return 1;
+}
+
+static void
+qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+				  struct req_que *req, uint32_t index)
+{
+	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp;
+	uint16_t	comp_status;
+	uint16_t	scsi_status;
+	uint16_t thread_id;
+	uint32_t rval = EXT_STATUS_OK;
+	struct bsg_job *bsg_job = NULL;
+	struct fc_bsg_request *bsg_request;
+	struct fc_bsg_reply *bsg_reply;
+	sts_entry_t *sts;
+	struct sts_entry_24xx *sts24;
+	sts = (sts_entry_t *) pkt;
+	sts24 = (struct sts_entry_24xx *) pkt;
+
+	/* Validate handle. */
+	if (index >= req->num_outstanding_cmds) {
+		ql_log(ql_log_warn, vha, 0x70af,
+		    "Invalid SCSI completion handle 0x%x.\n", index);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		return;
+	}
+
+	sp = req->outstanding_cmds[index];
+	if (!sp) {
+		ql_log(ql_log_warn, vha, 0x70b0,
+		    "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
+		    req->id, index);
+
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		return;
+	}
+
+	/* Free outstanding command slot. */
+	req->outstanding_cmds[index] = NULL;
+	bsg_job = sp->u.bsg_job;
+	bsg_request = bsg_job->request;
+	bsg_reply = bsg_job->reply;
+
+	if (IS_FWI2_CAPABLE(ha)) {
+		comp_status = le16_to_cpu(sts24->comp_status);
+		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+	} else {
+		comp_status = le16_to_cpu(sts->comp_status);
+		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+	}
+
+	thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+	switch (comp_status) {
+	case CS_COMPLETE:
+		if (scsi_status == 0) {
+			bsg_reply->reply_payload_rcv_len =
+					bsg_job->reply_payload.payload_len;
+			vha->qla_stats.input_bytes +=
+				bsg_reply->reply_payload_rcv_len;
+			vha->qla_stats.input_requests++;
+			rval = EXT_STATUS_OK;
+		}
+		goto done;
+
+	case CS_DATA_OVERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b1,
+		    "Command completed with data overrun thread_id=%d\n",
+		    thread_id);
+		rval = EXT_STATUS_DATA_OVERRUN;
+		break;
+
+	case CS_DATA_UNDERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b2,
+		    "Command completed with data underrun thread_id=%d\n",
+		    thread_id);
+		rval = EXT_STATUS_DATA_UNDERRUN;
+		break;
+	case CS_BIDIR_RD_OVERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b3,
+		    "Command completed with read data overrun thread_id=%d\n",
+		    thread_id);
+		rval = EXT_STATUS_DATA_OVERRUN;
+		break;
+
+	case CS_BIDIR_RD_WR_OVERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b4,
+		    "Command completed with read and write data overrun "
+		    "thread_id=%d\n", thread_id);
+		rval = EXT_STATUS_DATA_OVERRUN;
+		break;
+
+	case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b5,
+		    "Command completed with read data over and write data "
+		    "underrun thread_id=%d\n", thread_id);
+		rval = EXT_STATUS_DATA_OVERRUN;
+		break;
+
+	case CS_BIDIR_RD_UNDERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b6,
+		    "Command completed with read data underrun "
+		    "thread_id=%d\n", thread_id);
+		rval = EXT_STATUS_DATA_UNDERRUN;
+		break;
+
+	case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b7,
+		    "Command completed with read data under and write data "
+		    "overrun thread_id=%d\n", thread_id);
+		rval = EXT_STATUS_DATA_UNDERRUN;
+		break;
+
+	case CS_BIDIR_RD_WR_UNDERRUN:
+		ql_dbg(ql_dbg_user, vha, 0x70b8,
+		    "Command completed with read and write data underrun "
+		    "thread_id=%d\n", thread_id);
+		rval = EXT_STATUS_DATA_UNDERRUN;
+		break;
+
+	case CS_BIDIR_DMA:
+		ql_dbg(ql_dbg_user, vha, 0x70b9,
+		    "Command completed with data DMA error thread_id=%d\n",
+		    thread_id);
+		rval = EXT_STATUS_DMA_ERR;
+		break;
+
+	case CS_TIMEOUT:
+		ql_dbg(ql_dbg_user, vha, 0x70ba,
+		    "Command completed with timeout thread_id=%d\n",
+		    thread_id);
+		rval = EXT_STATUS_TIMEOUT;
+		break;
+	default:
+		ql_dbg(ql_dbg_user, vha, 0x70bb,
+		    "Command completed with completion status=0x%x "
+		    "thread_id=%d\n", comp_status, thread_id);
+		rval = EXT_STATUS_ERR;
+		break;
+	}
+	bsg_reply->reply_payload_rcv_len = 0;
+
+done:
+	/* Return the vendor specific reply to API */
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+	/* Always return DID_OK, bsg will send the vendor specific response
+	 * in this case only */
+	sp->done(sp, DID_OK << 6);
+
+}
+
+/**
+ * qla2x00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+	srb_t		*sp;
+	fc_port_t	*fcport;
+	struct scsi_cmnd *cp;
+	sts_entry_t *sts;
+	struct sts_entry_24xx *sts24;
+	uint16_t	comp_status;
+	uint16_t	scsi_status;
+	uint16_t	ox_id;
+	uint8_t		lscsi_status;
+	int32_t		resid;
+	uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
+	    fw_resid_len;
+	uint8_t		*rsp_info, *sense_data;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t handle;
+	uint16_t que;
+	struct req_que *req;
+	int logit = 1;
+	int res = 0;
+	uint16_t state_flags = 0;
+	uint16_t retry_delay = 0;
+
+	sts = (sts_entry_t *) pkt;
+	sts24 = (struct sts_entry_24xx *) pkt;
+	if (IS_FWI2_CAPABLE(ha)) {
+		comp_status = le16_to_cpu(sts24->comp_status);
+		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
+		state_flags = le16_to_cpu(sts24->state_flags);
+	} else {
+		comp_status = le16_to_cpu(sts->comp_status);
+		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+	}
+	handle = (uint32_t) LSW(sts->handle);
+	que = MSW(sts->handle);
+	req = ha->req_q_map[que];
+
+	/* Check for invalid queue pointer */
+	if (req == NULL ||
+	    que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+		ql_dbg(ql_dbg_io, vha, 0x3059,
+		    "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+		    "que=%u.\n", sts->handle, req, que);
+		return;
+	}
+
+	/* Validate handle. */
+	if (handle < req->num_outstanding_cmds) {
+		sp = req->outstanding_cmds[handle];
+		if (!sp) {
+			ql_dbg(ql_dbg_io, vha, 0x3075,
+			    "%s(%ld): Already returned command for status handle (0x%x).\n",
+			    __func__, vha->host_no, sts->handle);
+			return;
+		}
+	} else {
+		ql_dbg(ql_dbg_io, vha, 0x3017,
+		    "Invalid status handle, out of range (0x%x).\n",
+		    sts->handle);
+
+		if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+			if (IS_P3P_TYPE(ha))
+				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+			else
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		return;
+	}
+
+	if (sp->cmd_type != TYPE_SRB) {
+		req->outstanding_cmds[handle] = NULL;
+		ql_dbg(ql_dbg_io, vha, 0x3015,
+		    "Unknown sp->cmd_type %x %p).\n",
+		    sp->cmd_type, sp);
+		return;
+	}
+
+	/* NVME completion. */
+	if (sp->type == SRB_NVME_CMD) {
+		qla24xx_nvme_iocb_entry(vha, req, pkt);
+		return;
+	}
+
+	if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
+		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
+		return;
+	}
+
+	/* Task Management completion. */
+	if (sp->type == SRB_TM_CMD) {
+		qla24xx_tm_iocb_entry(vha, req, pkt);
+		return;
+	}
+
+	/* Fast path completion. */
+	if (comp_status == CS_COMPLETE && scsi_status == 0) {
+		qla2x00_process_completed_request(vha, req, handle);
+
+		return;
+	}
+
+	req->outstanding_cmds[handle] = NULL;
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_dbg(ql_dbg_io, vha, 0x3018,
+		    "Command already returned (0x%x/%p).\n",
+		    sts->handle, sp);
+
+		return;
+	}
+
+	lscsi_status = scsi_status & STATUS_MASK;
+
+	fcport = sp->fcport;
+
+	ox_id = 0;
+	sense_len = par_sense_len = rsp_info_len = resid_len =
+	    fw_resid_len = 0;
+	if (IS_FWI2_CAPABLE(ha)) {
+		if (scsi_status & SS_SENSE_LEN_VALID)
+			sense_len = le32_to_cpu(sts24->sense_len);
+		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
+		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+			resid_len = le32_to_cpu(sts24->rsp_residual_count);
+		if (comp_status == CS_DATA_UNDERRUN)
+			fw_resid_len = le32_to_cpu(sts24->residual_len);
+		rsp_info = sts24->data;
+		sense_data = sts24->data;
+		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
+		ox_id = le16_to_cpu(sts24->ox_id);
+		par_sense_len = sizeof(sts24->data);
+		/* Valid values of the retry delay timer are 0x1-0xffef */
+		if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
+			retry_delay = sts24->retry_delay & 0x3fff;
+			ql_dbg(ql_dbg_io, sp->vha, 0x3033,
+			    "%s: scope=%#x retry_delay=%#x\n", __func__,
+			    sts24->retry_delay >> 14, retry_delay);
+		}
+	} else {
+		if (scsi_status & SS_SENSE_LEN_VALID)
+			sense_len = le16_to_cpu(sts->req_sense_length);
+		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
+			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
+		resid_len = le32_to_cpu(sts->residual_length);
+		rsp_info = sts->rsp_info;
+		sense_data = sts->req_sense_data;
+		par_sense_len = sizeof(sts->req_sense_data);
+	}
+
+	/* Check for any FCP transport errors. */
+	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
+		/* Sense data lies beyond any FCP RESPONSE data. */
+		if (IS_FWI2_CAPABLE(ha)) {
+			sense_data += rsp_info_len;
+			par_sense_len -= rsp_info_len;
+		}
+		if (rsp_info_len > 3 && rsp_info[3]) {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
+			    "FCP I/O protocol failure (0x%x/0x%x).\n",
+			    rsp_info_len, rsp_info[3]);
+
+			res = DID_BUS_BUSY << 16;
+			goto out;
+		}
+	}
+
+	/* Check for overrun. */
+	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
+	    scsi_status & SS_RESIDUAL_OVER)
+		comp_status = CS_DATA_OVERRUN;
+
+	/*
+	 * Check retry_delay_timer value if we receive a busy or
+	 * queue full.
+	 */
+	if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
+	    lscsi_status == SAM_STAT_BUSY)
+		qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+
+	/*
+	 * Based on Host and scsi status generate status code for Linux
+	 */
+	switch (comp_status) {
+	case CS_COMPLETE:
+	case CS_QUEUE_FULL:
+		if (scsi_status == 0) {
+			res = DID_OK << 16;
+			break;
+		}
+		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
+			resid = resid_len;
+			scsi_set_resid(cp, resid);
+
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			     cp->underflow)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
+				    "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16;
+				break;
+			}
+		}
+		res = DID_OK << 16 | lscsi_status;
+
+		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
+			    "QUEUE FULL detected.\n");
+			break;
+		}
+		logit = 0;
+		if (lscsi_status != SS_CHECK_CONDITION)
+			break;
+
+		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+		if (!(scsi_status & SS_SENSE_LEN_VALID))
+			break;
+
+		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+		    rsp, res);
+		break;
+
+	case CS_DATA_UNDERRUN:
+		/* Use F/W calculated residual length. */
+		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
+		scsi_set_resid(cp, resid);
+		if (scsi_status & SS_RESIDUAL_UNDER) {
+			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
+				    "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16 | lscsi_status;
+				goto check_scsi_status;
+			}
+
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			    cp->underflow)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
+				    "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16;
+				break;
+			}
+		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+			    lscsi_status != SAM_STAT_BUSY) {
+			/*
+			 * scsi status of task set and busy are considered to be
+			 * task not completed.
+			 */
+
+			ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
+			    "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+			    resid, scsi_bufflen(cp));
+
+			res = DID_ERROR << 16 | lscsi_status;
+			goto check_scsi_status;
+		} else {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
+			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+			    scsi_status, lscsi_status);
+		}
+
+		res = DID_OK << 16 | lscsi_status;
+		logit = 0;
+
+check_scsi_status:
+		/*
+		 * Check to see if SCSI Status is non zero. If so report SCSI
+		 * Status.
+		 */
+		if (lscsi_status != 0) {
+			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
+				    "QUEUE FULL detected.\n");
+				logit = 1;
+				break;
+			}
+			if (lscsi_status != SS_CHECK_CONDITION)
+				break;
+
+			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+			if (!(scsi_status & SS_SENSE_LEN_VALID))
+				break;
+
+			qla2x00_handle_sense(sp, sense_data, par_sense_len,
+			    sense_len, rsp, res);
+		}
+		break;
+
+	case CS_PORT_LOGGED_OUT:
+	case CS_PORT_CONFIG_CHG:
+	case CS_PORT_BUSY:
+	case CS_INCOMPLETE:
+	case CS_PORT_UNAVAILABLE:
+	case CS_TIMEOUT:
+	case CS_RESET:
+
+		/*
+		 * We are going to have the fc class block the rport
+		 * while we try to recover so instruct the mid layer
+		 * to requeue until the class decides how to handle this.
+		 */
+		res = DID_TRANSPORT_DISRUPTED << 16;
+
+		if (comp_status == CS_TIMEOUT) {
+			if (IS_FWI2_CAPABLE(ha))
+				break;
+			else if ((le16_to_cpu(sts->status_flags) &
+			    SF_LOGOUT_SENT) == 0)
+				break;
+		}
+
+		if (atomic_read(&fcport->state) == FCS_ONLINE) {
+			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+				"Port to be marked lost on fcport=%02x%02x%02x, current "
+				"port state= %s comp_status %x.\n", fcport->d_id.b.domain,
+				fcport->d_id.b.area, fcport->d_id.b.al_pa,
+				port_state_str[atomic_read(&fcport->state)],
+				comp_status);
+
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+			qlt_schedule_sess_for_deletion_lock(fcport);
+		}
+
+		break;
+
+	case CS_ABORTED:
+		res = DID_RESET << 16;
+		break;
+
+	case CS_DIF_ERROR:
+		logit = qla2x00_handle_dif_error(sp, sts24);
+		res = cp->result;
+		break;
+
+	case CS_TRANSPORT:
+		res = DID_ERROR << 16;
+
+		if (!IS_PI_SPLIT_DET_CAPABLE(ha))
+			break;
+
+		if (state_flags & BIT_4)
+			scmd_printk(KERN_WARNING, cp,
+			    "Unsupported device '%s' found.\n",
+			    cp->device->vendor);
+		break;
+
+	default:
+		res = DID_ERROR << 16;
+		break;
+	}
+
+out:
+	if (logit)
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
+		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
+		    "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
+		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
+		    comp_status, scsi_status, res, vha->host_no,
+		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+		    cp->cmnd, scsi_bufflen(cp), rsp_info_len,
+		    resid_len, fw_resid_len, sp, cp);
+
+	if (rsp->status_srb == NULL)
+		sp->done(sp, res);
+}
+
+/**
+ * qla2x00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+	uint8_t	sense_sz = 0;
+	struct qla_hw_data *ha = rsp->hw;
+	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+	srb_t *sp = rsp->status_srb;
+	struct scsi_cmnd *cp;
+	uint32_t sense_len;
+	uint8_t *sense_ptr;
+
+	if (!sp || !GET_CMD_SENSE_LEN(sp))
+		return;
+
+	sense_len = GET_CMD_SENSE_LEN(sp);
+	sense_ptr = GET_CMD_SENSE_PTR(sp);
+
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_log(ql_log_warn, vha, 0x3025,
+		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+		rsp->status_srb = NULL;
+		return;
+	}
+
+	if (sense_len > sizeof(pkt->data))
+		sense_sz = sizeof(pkt->data);
+	else
+		sense_sz = sense_len;
+
+	/* Move sense data. */
+	if (IS_FWI2_CAPABLE(ha))
+		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
+	memcpy(sense_ptr, pkt->data, sense_sz);
+	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+		sense_ptr, sense_sz);
+
+	sense_len -= sense_sz;
+	sense_ptr += sense_sz;
+
+	SET_CMD_SENSE_PTR(sp, sense_ptr);
+	SET_CMD_SENSE_LEN(sp, sense_len);
+
+	/* Place command on done queue. */
+	if (sense_len == 0) {
+		rsp->status_srb = NULL;
+		sp->done(sp, cp->result);
+	}
+}
+
+/**
+ * qla2x00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ * return : 1=allow further error analysis. 0=no additional error analysis.
+ */
+static int
+qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	const char func[] = "ERROR-IOCB";
+	uint16_t que = MSW(pkt->handle);
+	struct req_que *req = NULL;
+	int res = DID_ERROR << 16;
+
+	ql_dbg(ql_dbg_async, vha, 0x502a,
+	    "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
+	    pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
+
+	if (que >= ha->max_req_queues || !ha->req_q_map[que])
+		goto fatal;
+
+	req = ha->req_q_map[que];
+
+	if (pkt->entry_status & RF_BUSY)
+		res = DID_BUS_BUSY << 16;
+
+	if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
+		return 0;
+
+	switch (pkt->entry_type) {
+	case NOTIFY_ACK_TYPE:
+	case STATUS_TYPE:
+	case STATUS_CONT_TYPE:
+	case LOGINOUT_PORT_IOCB_TYPE:
+	case CT_IOCB_TYPE:
+	case ELS_IOCB_TYPE:
+	case ABORT_IOCB_TYPE:
+	case MBX_IOCB_TYPE:
+	default:
+		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+		if (sp) {
+			sp->done(sp, res);
+			return 0;
+		}
+		break;
+
+	case ABTS_RESP_24XX:
+	case CTIO_TYPE7:
+	case CTIO_CRC2:
+		return 1;
+	}
+fatal:
+	ql_log(ql_log_warn, vha, 0x5030,
+	    "Error entry - invalid handle/queue (%04x).\n", que);
+	return 0;
+}
+
+/**
+ * qla24xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+static void
+qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+	uint16_t	cnt;
+	uint32_t	mboxes;
+	uint16_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	/* Read all mbox registers? */
+	WARN_ON_ONCE(ha->mbx_count > 32);
+	mboxes = (1ULL << ha->mbx_count) - 1;
+	if (!ha->mcp)
+		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
+	else
+		mboxes = ha->mcp->in_mb;
+
+	/* Load return mailbox registers. */
+	ha->flags.mbox_int = 1;
+	ha->mailbox_out[0] = mb0;
+	mboxes >>= 1;
+	wptr = (uint16_t __iomem *)&reg->mailbox1;
+
+	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+		if (mboxes & BIT_0)
+			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+
+		mboxes >>= 1;
+		wptr++;
+	}
+}
+
+static void
+qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+	struct abort_entry_24xx *pkt)
+{
+	const char func[] = "ABT_IOCB";
+	srb_t *sp;
+	struct srb_iocb *abt;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	abt = &sp->u.iocb_cmd;
+	abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
+	sp->done(sp, 0);
+}
+
+void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
+    struct pt_ls4_request *pkt, struct req_que *req)
+{
+	srb_t *sp;
+	const char func[] = "LS4_IOCB";
+	uint16_t comp_status;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	comp_status = le16_to_cpu(pkt->status);
+	sp->done(sp, comp_status);
+}
+
+/**
+ * qla24xx_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+	struct rsp_que *rsp)
+{
+	struct sts_entry_24xx *pkt;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->flags.fw_started)
+		return;
+
+	if (rsp->qpair->cpuid != smp_processor_id())
+		qla_cpu_update(rsp->qpair, smp_processor_id());
+
+	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+
+		rsp->ring_index++;
+		if (rsp->ring_index == rsp->length) {
+			rsp->ring_index = 0;
+			rsp->ring_ptr = rsp->ring;
+		} else {
+			rsp->ring_ptr++;
+		}
+
+		if (pkt->entry_status != 0) {
+			if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
+				goto process_err;
+
+			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+			wmb();
+			continue;
+		}
+process_err:
+
+		switch (pkt->entry_type) {
+		case STATUS_TYPE:
+			qla2x00_status_entry(vha, rsp, pkt);
+			break;
+		case STATUS_CONT_TYPE:
+			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+			break;
+		case VP_RPT_ID_IOCB_TYPE:
+			qla24xx_report_id_acquisition(vha,
+			    (struct vp_rpt_id_entry_24xx *)pkt);
+			break;
+		case LOGINOUT_PORT_IOCB_TYPE:
+			qla24xx_logio_entry(vha, rsp->req,
+			    (struct logio_entry_24xx *)pkt);
+			break;
+		case CT_IOCB_TYPE:
+			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+			break;
+		case ELS_IOCB_TYPE:
+			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
+			break;
+		case ABTS_RECV_24XX:
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+				/* ensure that the ATIO queue is empty */
+				qlt_handle_abts_recv(vha, rsp,
+				    (response_t *)pkt);
+				break;
+			} else {
+				/* drop through */
+				qlt_24xx_process_atio_queue(vha, 1);
+			}
+		case ABTS_RESP_24XX:
+		case CTIO_TYPE7:
+		case CTIO_CRC2:
+			qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
+			break;
+		case PT_LS4_REQUEST:
+			qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
+			    rsp->req);
+			break;
+		case NOTIFY_ACK_TYPE:
+			if (pkt->handle == QLA_TGT_SKIP_HANDLE)
+				qlt_response_pkt_all_vps(vha, rsp,
+				    (response_t *)pkt);
+			else
+				qla24xxx_nack_iocb_entry(vha, rsp->req,
+					(struct nack_to_isp *)pkt);
+			break;
+		case MARKER_TYPE:
+			/* Do nothing in this case, this check is to prevent it
+			 * from falling into default case
+			 */
+			break;
+		case ABORT_IOCB_TYPE:
+			qla24xx_abort_iocb_entry(vha, rsp->req,
+			    (struct abort_entry_24xx *)pkt);
+			break;
+		case MBX_IOCB_TYPE:
+			qla24xx_mbx_iocb_entry(vha, rsp->req,
+			    (struct mbx_24xx_entry *)pkt);
+			break;
+		default:
+			/* Type Not Supported. */
+			ql_dbg(ql_dbg_async, vha, 0x5042,
+			    "Received unknown response pkt type %x "
+			    "entry status=%x.\n",
+			    pkt->entry_type, pkt->entry_status);
+			break;
+		}
+		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
+		wmb();
+	}
+
+	/* Adjust ring index */
+	if (IS_P3P_TYPE(ha)) {
+		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+		WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+	} else {
+		WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+	}
+}
+
+static void
+qla2xxx_check_risc_status(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint32_t cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+	    !IS_QLA27XX(ha))
+		return;
+
+	rval = QLA_SUCCESS;
+	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
+	RD_REG_DWORD(&reg->iobase_addr);
+	WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+	for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt) {
+			WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+			udelay(10);
+		} else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+	if (rval == QLA_SUCCESS)
+		goto next_test;
+
+	rval = QLA_SUCCESS;
+	WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+	for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt) {
+			WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+			udelay(10);
+		} else
+			rval = QLA_FUNCTION_TIMEOUT;
+	}
+	if (rval != QLA_SUCCESS)
+		goto done;
+
+next_test:
+	if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
+		ql_log(ql_log_info, vha, 0x504c,
+		    "Additional code -- 0x55AA.\n");
+
+done:
+	WRT_REG_DWORD(&reg->iobase_window, 0x0000);
+	RD_REG_DWORD(&reg->iobase_window);
+}
+
+/**
+ * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla24xx_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct device_reg_24xx __iomem *reg;
+	int		status;
+	unsigned long	iter;
+	uint32_t	stat;
+	uint32_t	hccr;
+	uint16_t	mb[8];
+	struct rsp_que *rsp;
+	unsigned long	flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x5059,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+
+	ha = rsp->hw;
+	reg = &ha->iobase->isp24;
+	status = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	for (iter = 50; iter--; ) {
+		stat = RD_REG_DWORD(&reg->host_status);
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
+			break;
+		if (stat & HSRX_RISC_PAUSED) {
+			if (unlikely(pci_channel_offline(ha->pdev)))
+				break;
+
+			hccr = RD_REG_DWORD(&reg->hccr);
+
+			ql_log(ql_log_warn, vha, 0x504b,
+			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
+			    hccr);
+
+			qla2xxx_check_risc_status(vha);
+
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			break;
+		} else if ((stat & HSRX_RISC_INT) == 0)
+			break;
+
+		switch (stat & 0xff) {
+		case INTR_ROM_MB_SUCCESS:
+		case INTR_ROM_MB_FAILED:
+		case INTR_MB_SUCCESS:
+		case INTR_MB_FAILED:
+			qla24xx_mbx_completion(vha, MSW(stat));
+			status |= MBX_INTERRUPT;
+
+			break;
+		case INTR_ASYNC_EVENT:
+			mb[0] = MSW(stat);
+			mb[1] = RD_REG_WORD(&reg->mailbox1);
+			mb[2] = RD_REG_WORD(&reg->mailbox2);
+			mb[3] = RD_REG_WORD(&reg->mailbox3);
+			qla2x00_async_event(vha, rsp, mb);
+			break;
+		case INTR_RSP_QUE_UPDATE:
+		case INTR_RSP_QUE_UPDATE_83XX:
+			qla24xx_process_response_queue(vha, rsp);
+			break;
+		case INTR_ATIO_QUE_UPDATE:{
+			unsigned long flags2;
+			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+			qlt_24xx_process_atio_queue(vha, 1);
+			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+			break;
+		}
+		case INTR_ATIO_RSP_QUE_UPDATE: {
+			unsigned long flags2;
+			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+			qlt_24xx_process_atio_queue(vha, 1);
+			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+
+			qla24xx_process_response_queue(vha, rsp);
+			break;
+		}
+		default:
+			ql_dbg(ql_dbg_async, vha, 0x504f,
+			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
+			break;
+		}
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+		RD_REG_DWORD_RELAXED(&reg->hccr);
+		if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
+			ndelay(3500);
+	}
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+qla24xx_msix_rsp_q(int irq, void *dev_id)
+{
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_24xx __iomem *reg;
+	struct scsi_qla_host *vha;
+	unsigned long flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x505a,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+	reg = &ha->iobase->isp24;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	vha = pci_get_drvdata(ha->pdev);
+	qla24xx_process_response_queue(vha, rsp);
+	if (!ha->flags.disable_msix_handshake) {
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+		RD_REG_DWORD_RELAXED(&reg->hccr);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+qla24xx_msix_default(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_24xx __iomem *reg;
+	int		status;
+	uint32_t	stat;
+	uint32_t	hccr;
+	uint16_t	mb[8];
+	unsigned long flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x505c,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+	reg = &ha->iobase->isp24;
+	status = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	do {
+		stat = RD_REG_DWORD(&reg->host_status);
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
+			break;
+		if (stat & HSRX_RISC_PAUSED) {
+			if (unlikely(pci_channel_offline(ha->pdev)))
+				break;
+
+			hccr = RD_REG_DWORD(&reg->hccr);
+
+			ql_log(ql_log_info, vha, 0x5050,
+			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
+			    hccr);
+
+			qla2xxx_check_risc_status(vha);
+
+			ha->isp_ops->fw_dump(vha, 1);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			break;
+		} else if ((stat & HSRX_RISC_INT) == 0)
+			break;
+
+		switch (stat & 0xff) {
+		case INTR_ROM_MB_SUCCESS:
+		case INTR_ROM_MB_FAILED:
+		case INTR_MB_SUCCESS:
+		case INTR_MB_FAILED:
+			qla24xx_mbx_completion(vha, MSW(stat));
+			status |= MBX_INTERRUPT;
+
+			break;
+		case INTR_ASYNC_EVENT:
+			mb[0] = MSW(stat);
+			mb[1] = RD_REG_WORD(&reg->mailbox1);
+			mb[2] = RD_REG_WORD(&reg->mailbox2);
+			mb[3] = RD_REG_WORD(&reg->mailbox3);
+			qla2x00_async_event(vha, rsp, mb);
+			break;
+		case INTR_RSP_QUE_UPDATE:
+		case INTR_RSP_QUE_UPDATE_83XX:
+			qla24xx_process_response_queue(vha, rsp);
+			break;
+		case INTR_ATIO_QUE_UPDATE:{
+			unsigned long flags2;
+			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+			qlt_24xx_process_atio_queue(vha, 1);
+			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+			break;
+		}
+		case INTR_ATIO_RSP_QUE_UPDATE: {
+			unsigned long flags2;
+			spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
+			qlt_24xx_process_atio_queue(vha, 1);
+			spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+
+			qla24xx_process_response_queue(vha, rsp);
+			break;
+		}
+		default:
+			ql_dbg(ql_dbg_async, vha, 0x5051,
+			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
+			break;
+		}
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+	} while (0);
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+	struct qla_hw_data *ha;
+	struct qla_qpair *qpair;
+	struct device_reg_24xx __iomem *reg;
+	unsigned long flags;
+
+	qpair = dev_id;
+	if (!qpair) {
+		ql_log(ql_log_info, NULL, 0x505b,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = qpair->hw;
+
+	/* Clear the interrupt, if enabled, for this response queue */
+	if (unlikely(!ha->flags.disable_msix_handshake)) {
+		reg = &ha->iobase->isp24;
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	queue_work(ha->wq, &qpair->q_work);
+
+	return IRQ_HANDLED;
+}
+
+/* Interrupt handling helpers. */
+
+struct qla_init_msix_entry {
+	const char *name;
+	irq_handler_t handler;
+};
+
+static const struct qla_init_msix_entry msix_entries[] = {
+	{ "default", qla24xx_msix_default },
+	{ "rsp_q", qla24xx_msix_rsp_q },
+	{ "atio_q", qla83xx_msix_atio_q },
+	{ "qpair_multiq", qla2xxx_msix_rsp_q },
+};
+
+static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
+	{ "qla2xxx (default)", qla82xx_msix_default },
+	{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
+};
+
+static int
+qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+	int i, ret;
+	struct qla_msix_entry *qentry;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	int min_vecs = QLA_BASE_VECTORS;
+	struct irq_affinity desc = {
+		.pre_vectors = QLA_BASE_VECTORS,
+	};
+
+	if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+		desc.pre_vectors++;
+		min_vecs++;
+	}
+
+	if (USER_CTRL_IRQ(ha)) {
+		/* user wants to control IRQ setting for target mode */
+		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
+		    ha->msix_count, PCI_IRQ_MSIX);
+	} else
+		ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
+		    ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+		    &desc);
+
+	if (ret < 0) {
+		ql_log(ql_log_fatal, vha, 0x00c7,
+		    "MSI-X: Failed to enable support, "
+		    "giving   up -- %d/%d.\n",
+		    ha->msix_count, ret);
+		goto msix_out;
+	} else if (ret < ha->msix_count) {
+		ql_log(ql_log_info, vha, 0x00c6,
+		    "MSI-X: Using %d vectors\n", ret);
+		ha->msix_count = ret;
+		/* Recalculate queue values */
+		if (ha->mqiobase && ql2xmqsupport) {
+			ha->max_req_queues = ha->msix_count - 1;
+
+			/* ATIOQ needs 1 vector. That's 1 less QPair */
+			if (QLA_TGT_MODE_ENABLED())
+				ha->max_req_queues--;
+
+			ha->max_rsp_queues = ha->max_req_queues;
+
+			ha->max_qpairs = ha->max_req_queues - 1;
+			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+			    "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+		}
+	}
+	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+				ha->msix_count, GFP_KERNEL);
+	if (!ha->msix_entries) {
+		ql_log(ql_log_fatal, vha, 0x00c8,
+		    "Failed to allocate memory for ha->msix_entries.\n");
+		ret = -ENOMEM;
+		goto free_irqs;
+	}
+	ha->flags.msix_enabled = 1;
+
+	for (i = 0; i < ha->msix_count; i++) {
+		qentry = &ha->msix_entries[i];
+		qentry->vector = pci_irq_vector(ha->pdev, i);
+		qentry->entry = i;
+		qentry->have_irq = 0;
+		qentry->in_use = 0;
+		qentry->handle = NULL;
+	}
+
+	/* Enable MSI-X vectors for the base queue */
+	for (i = 0; i < QLA_BASE_VECTORS; i++) {
+		qentry = &ha->msix_entries[i];
+		qentry->handle = rsp;
+		rsp->msix = qentry;
+		scnprintf(qentry->name, sizeof(qentry->name),
+		    "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
+		if (IS_P3P_TYPE(ha))
+			ret = request_irq(qentry->vector,
+				qla82xx_msix_entries[i].handler,
+				0, qla82xx_msix_entries[i].name, rsp);
+		else
+			ret = request_irq(qentry->vector,
+				msix_entries[i].handler,
+				0, qentry->name, rsp);
+		if (ret)
+			goto msix_register_fail;
+		qentry->have_irq = 1;
+		qentry->in_use = 1;
+	}
+
+	/*
+	 * If target mode is enable, also request the vector for the ATIO
+	 * queue.
+	 */
+	if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+		qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
+		rsp->msix = qentry;
+		qentry->handle = rsp;
+		scnprintf(qentry->name, sizeof(qentry->name),
+		    "qla2xxx%lu_%s", vha->host_no,
+		    msix_entries[QLA_ATIO_VECTOR].name);
+		qentry->in_use = 1;
+		ret = request_irq(qentry->vector,
+			msix_entries[QLA_ATIO_VECTOR].handler,
+			0, qentry->name, rsp);
+		qentry->have_irq = 1;
+	}
+
+msix_register_fail:
+	if (ret) {
+		ql_log(ql_log_fatal, vha, 0x00cb,
+		    "MSI-X: unable to register handler -- %x/%d.\n",
+		    qentry->vector, ret);
+		qla2x00_free_irqs(vha);
+		ha->mqenable = 0;
+		goto msix_out;
+	}
+
+	/* Enable MSI-X vector for response queue update for queue 0 */
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		if (ha->msixbase && ha->mqiobase &&
+		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+		     ql2xmqsupport))
+			ha->mqenable = 1;
+	} else
+		if (ha->mqiobase &&
+		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+		     ql2xmqsupport))
+			ha->mqenable = 1;
+	ql_dbg(ql_dbg_multiq, vha, 0xc005,
+	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
+	ql_dbg(ql_dbg_init, vha, 0x0055,
+	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
+
+msix_out:
+	return ret;
+
+free_irqs:
+	pci_free_irq_vectors(ha->pdev);
+	goto msix_out;
+}
+
+int
+qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	device_reg_t *reg = ha->iobase;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	/* If possible, enable MSI-X. */
+	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
+	    !IS_QLA27XX(ha))
+		goto skip_msi;
+
+	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+		(ha->pdev->subsystem_device == 0x7040 ||
+		ha->pdev->subsystem_device == 0x7041 ||
+		ha->pdev->subsystem_device == 0x1705)) {
+		ql_log(ql_log_warn, vha, 0x0034,
+		    "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
+			ha->pdev->subsystem_vendor,
+			ha->pdev->subsystem_device);
+		goto skip_msi;
+	}
+
+	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
+		ql_log(ql_log_warn, vha, 0x0035,
+		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
+		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
+		goto skip_msix;
+	}
+
+	ret = qla24xx_enable_msix(ha, rsp);
+	if (!ret) {
+		ql_dbg(ql_dbg_init, vha, 0x0036,
+		    "MSI-X: Enabled (0x%X, 0x%X).\n",
+		    ha->chip_revision, ha->fw_attributes);
+		goto clear_risc_ints;
+	}
+
+skip_msix:
+
+	ql_log(ql_log_info, vha, 0x0037,
+	    "Falling back-to MSI mode -- ret=%d.\n", ret);
+
+	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
+	    !IS_QLA27XX(ha))
+		goto skip_msi;
+
+	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+	if (ret > 0) {
+		ql_dbg(ql_dbg_init, vha, 0x0038,
+		    "MSI: Enabled.\n");
+		ha->flags.msi_enabled = 1;
+	} else
+		ql_log(ql_log_warn, vha, 0x0039,
+		    "Falling back-to INTa mode -- ret=%d.\n", ret);
+skip_msi:
+
+	/* Skip INTx on ISP82xx. */
+	if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
+	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,
+	    QLA2XXX_DRIVER_NAME, rsp);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0x003a,
+		    "Failed to reserve interrupt %d already in use.\n",
+		    ha->pdev->irq);
+		goto fail;
+	} else if (!ha->flags.msi_enabled) {
+		ql_dbg(ql_dbg_init, vha, 0x0125,
+		    "INTa mode: Enabled.\n");
+		ha->flags.mr_intr_valid = 1;
+	}
+
+clear_risc_ints:
+	if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+		goto fail;
+
+	spin_lock_irq(&ha->hardware_lock);
+	WRT_REG_WORD(&reg->isp.semaphore, 0);
+	spin_unlock_irq(&ha->hardware_lock);
+
+fail:
+	return ret;
+}
+
+void
+qla2x00_free_irqs(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct rsp_que *rsp;
+	struct qla_msix_entry *qentry;
+	int i;
+
+	/*
+	 * We need to check that ha->rsp_q_map is valid in case we are called
+	 * from a probe failure context.
+	 */
+	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+		goto free_irqs;
+	rsp = ha->rsp_q_map[0];
+
+	if (ha->flags.msix_enabled) {
+		for (i = 0; i < ha->msix_count; i++) {
+			qentry = &ha->msix_entries[i];
+			if (qentry->have_irq) {
+				irq_set_affinity_notifier(qentry->vector, NULL);
+				free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+			}
+		}
+		kfree(ha->msix_entries);
+		ha->msix_entries = NULL;
+		ha->flags.msix_enabled = 0;
+		ql_dbg(ql_dbg_init, vha, 0x0042,
+			"Disabled MSI-X.\n");
+	} else {
+		free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+	}
+
+free_irqs:
+	pci_free_irq_vectors(ha->pdev);
+}
+
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+	struct qla_msix_entry *msix, int vector_type)
+{
+	const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	int ret;
+
+	scnprintf(msix->name, sizeof(msix->name),
+	    "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+	ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
+	if (ret) {
+		ql_log(ql_log_fatal, vha, 0x00e6,
+		    "MSI-X: Unable to register handler -- %x/%d.\n",
+		    msix->vector, ret);
+		return ret;
+	}
+	msix->have_irq = 1;
+	msix->handle = qpair;
+	return ret;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mbx.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mbx.c
new file mode 100644
index 0000000..9d97371
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mbx.c
@@ -0,0 +1,6266 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_target.h"
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
+
+static struct mb_cmd_name {
+	uint16_t cmd;
+	const char *str;
+} mb_str[] = {
+	{MBC_GET_PORT_DATABASE,		"GPDB"},
+	{MBC_GET_ID_LIST,		"GIDList"},
+	{MBC_GET_LINK_PRIV_STATS,	"Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+	int i;
+	struct mb_cmd_name *e;
+
+	for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+		e = mb_str + i;
+		if (cmd == e->cmd)
+			return e->str;
+	}
+	return "unknown";
+}
+
+static struct rom_cmd {
+	uint16_t cmd;
+} rom_cmds[] = {
+	{ MBC_LOAD_RAM },
+	{ MBC_EXECUTE_FIRMWARE },
+	{ MBC_READ_RAM_WORD },
+	{ MBC_MAILBOX_REGISTER_TEST },
+	{ MBC_VERIFY_CHECKSUM },
+	{ MBC_GET_FIRMWARE_VERSION },
+	{ MBC_LOAD_RISC_RAM },
+	{ MBC_DUMP_RISC_RAM },
+	{ MBC_LOAD_RISC_RAM_EXTENDED },
+	{ MBC_DUMP_RISC_RAM_EXTENDED },
+	{ MBC_WRITE_RAM_WORD_EXTENDED },
+	{ MBC_READ_RAM_EXTENDED },
+	{ MBC_GET_RESOURCE_COUNTS },
+	{ MBC_SET_FIRMWARE_OPTION },
+	{ MBC_MID_INITIALIZE_FIRMWARE },
+	{ MBC_GET_FIRMWARE_STATE },
+	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+	{ MBC_GET_RETRY_COUNT },
+	{ MBC_TRACE_CONTROL },
+	{ MBC_INITIALIZE_MULTIQ },
+	{ MBC_IOCB_COMMAND_A64 },
+	{ MBC_GET_ADAPTER_LOOP_ID },
+	{ MBC_READ_SFP },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+	int i;
+	struct  rom_cmd *wc;
+
+	for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+		wc = rom_cmds + i;
+		if (wc->cmd == cmd)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * qla2x00_mailbox_command
+ *	Issue mailbox command and waits for completion.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ *	0 : QLA_SUCCESS = cmd performed success
+ *	1 : QLA_FUNCTION_FAILED   (error encountered)
+ *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+{
+	int		rval, i;
+	unsigned long    flags = 0;
+	device_reg_t *reg;
+	uint8_t		abort_active;
+	uint8_t		io_lock_on;
+	uint16_t	command = 0;
+	uint16_t	*iptr;
+	uint16_t __iomem *optr;
+	uint32_t	cnt;
+	uint32_t	mboxes;
+	unsigned long	wait_time;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
+
+	if (ha->pdev->error_state > pci_channel_io_frozen) {
+		ql_log(ql_log_warn, vha, 0x1001,
+		    "error_state is greater than pci_channel_io_frozen, "
+		    "exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_log(ql_log_warn, vha, 0x1002,
+		    "Device in failed state, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	/* if PCI error, then avoid mbx processing.*/
+	if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
+	    test_bit(UNLOADING, &base_vha->dpc_flags)) {
+		ql_log(ql_log_warn, vha, 0xd04e,
+		    "PCI error, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	reg = ha->iobase;
+	io_lock_on = base_vha->flags.init_done;
+
+	rval = QLA_SUCCESS;
+	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+
+	if (ha->flags.pci_channel_io_perm_failure) {
+		ql_log(ql_log_warn, vha, 0x1003,
+		    "Perm failure on EEH timeout MBX, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+		/* Setting Link-Down error */
+		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+		ql_log(ql_log_warn, vha, 0x1004,
+		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	/* check if ISP abort is active and return cmd with timeout */
+	if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+	    !is_rom_cmd(mcp->mb[0])) {
+		ql_log(ql_log_info, vha, 0x1005,
+		    "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+		    mcp->mb[0]);
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	/*
+	 * Wait for active mailbox commands to finish by waiting at most tov
+	 * seconds. This is to serialize actual issuing of mailbox cmds during
+	 * non ISP abort time.
+	 */
+	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+		/* Timeout occurred. Return error. */
+		ql_log(ql_log_warn, vha, 0xd035,
+		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
+		    mcp->mb[0]);
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	ha->flags.mbox_busy = 1;
+	/* Save mailbox command for debug */
+	ha->mcp = mcp;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1006,
+	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Load mailbox registers. */
+	if (IS_P3P_TYPE(ha))
+		optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+	else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
+		optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
+	else
+		optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
+
+	iptr = mcp->mb;
+	command = mcp->mb[0];
+	mboxes = mcp->out_mb;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1111,
+	    "Mailbox registers (OUT):\n");
+	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+		if (IS_QLA2200(ha) && cnt == 8)
+			optr =
+			    (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
+		if (mboxes & BIT_0) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1112,
+			    "mbox[%d]<-0x%04x\n", cnt, *iptr);
+			WRT_REG_WORD(optr, *iptr);
+		}
+
+		mboxes >>= 1;
+		optr++;
+		iptr++;
+	}
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
+	    "I/O Address = %p.\n", optr);
+
+	/* Issue set host interrupt command to send cmd out. */
+	ha->flags.mbox_int = 0;
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	/* Unlock mbx registers and wait for interrupt */
+	ql_dbg(ql_dbg_mbx, vha, 0x100f,
+	    "Going to unlock irq & waiting for interrupts. "
+	    "jiffies=%lx.\n", jiffies);
+
+	/* Wait for mbx cmd completion until timeout */
+
+	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+		if (IS_P3P_TYPE(ha)) {
+			if (RD_REG_DWORD(&reg->isp82.hint) &
+				HINT_MBX_INT_PENDING) {
+				spin_unlock_irqrestore(&ha->hardware_lock,
+					flags);
+				ha->flags.mbox_busy = 0;
+				ql_dbg(ql_dbg_mbx, vha, 0x1010,
+				    "Pending mailbox timeout, exiting.\n");
+				rval = QLA_FUNCTION_TIMEOUT;
+				goto premature_exit;
+			}
+			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+		} else if (IS_FWI2_CAPABLE(ha))
+			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+		else
+			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		wait_time = jiffies;
+		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
+		    mcp->tov * HZ)) {
+			ql_dbg(ql_dbg_mbx, vha, 0x117a,
+			    "cmd=%x Timeout.\n", command);
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+			clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		}
+		if (time_after(jiffies, wait_time + 5 * HZ))
+			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+			    command, jiffies_to_msecs(jiffies - wait_time));
+	} else {
+		ql_dbg(ql_dbg_mbx, vha, 0x1011,
+		    "Cmd=%x Polling Mode.\n", command);
+
+		if (IS_P3P_TYPE(ha)) {
+			if (RD_REG_DWORD(&reg->isp82.hint) &
+				HINT_MBX_INT_PENDING) {
+				spin_unlock_irqrestore(&ha->hardware_lock,
+					flags);
+				ha->flags.mbox_busy = 0;
+				ql_dbg(ql_dbg_mbx, vha, 0x1012,
+				    "Pending mailbox timeout, exiting.\n");
+				rval = QLA_FUNCTION_TIMEOUT;
+				goto premature_exit;
+			}
+			WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+		} else if (IS_FWI2_CAPABLE(ha))
+			WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+		else
+			WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+		while (!ha->flags.mbox_int) {
+			if (time_after(jiffies, wait_time))
+				break;
+
+			/* Check for pending interrupts. */
+			qla2x00_poll(ha->rsp_q_map[0]);
+
+			if (!ha->flags.mbox_int &&
+			    !(IS_QLA2200(ha) &&
+			    command == MBC_LOAD_RISC_RAM_EXTENDED))
+				msleep(10);
+		} /* while */
+		ql_dbg(ql_dbg_mbx, vha, 0x1013,
+		    "Waited %d sec.\n",
+		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+	}
+
+	/* Check whether we timed out */
+	if (ha->flags.mbox_int) {
+		uint16_t *iptr2;
+
+		ql_dbg(ql_dbg_mbx, vha, 0x1014,
+		    "Cmd=%x completed.\n", command);
+
+		/* Got interrupt. Clear the flag. */
+		ha->flags.mbox_int = 0;
+		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+			ha->flags.mbox_busy = 0;
+			/* Setting Link-Down error */
+			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+			ha->mcp = NULL;
+			rval = QLA_FUNCTION_FAILED;
+			ql_log(ql_log_warn, vha, 0xd048,
+			    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+			goto premature_exit;
+		}
+
+		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
+			rval = QLA_FUNCTION_FAILED;
+
+		/* Load return mailbox registers. */
+		iptr2 = mcp->mb;
+		iptr = (uint16_t *)&ha->mailbox_out[0];
+		mboxes = mcp->in_mb;
+
+		ql_dbg(ql_dbg_mbx, vha, 0x1113,
+		    "Mailbox registers (IN):\n");
+		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+			if (mboxes & BIT_0) {
+				*iptr2 = *iptr;
+				ql_dbg(ql_dbg_mbx, vha, 0x1114,
+				    "mbox[%d]->0x%04x\n", cnt, *iptr2);
+			}
+
+			mboxes >>= 1;
+			iptr2++;
+			iptr++;
+		}
+	} else {
+
+		uint16_t mb[8];
+		uint32_t ictrl, host_status, hccr;
+		uint16_t        w;
+
+		if (IS_FWI2_CAPABLE(ha)) {
+			mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+			mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+			mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+			mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+			mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
+			ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+			host_status = RD_REG_DWORD(&reg->isp24.host_status);
+			hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+			ql_log(ql_log_warn, vha, 0xd04c,
+			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+			    "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+			    command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+			    mb[7], host_status, hccr);
+
+		} else {
+			mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
+			ictrl = RD_REG_WORD(&reg->isp.ictrl);
+			ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+			    "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
+		}
+		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
+
+		/* Capture FW dump only, if PCI device active */
+		if (!pci_channel_offline(vha->hw->pdev)) {
+			pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+			if (w == 0xffff || ictrl == 0xffffffff) {
+				/* This is special case if there is unload
+				 * of driver happening and if PCI device go
+				 * into bad state due to PCI error condition
+				 * then only PCI ERR flag would be set.
+				 * we will do premature exit for above case.
+				 */
+				ha->flags.mbox_busy = 0;
+				rval = QLA_FUNCTION_TIMEOUT;
+				goto premature_exit;
+			}
+
+			/* Attempt to capture firmware dump for further
+			 * anallysis of the current formware state. we do not
+			 * need to do this if we are intentionally generating
+			 * a dump
+			 */
+			if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
+				ha->isp_ops->fw_dump(vha, 0);
+			rval = QLA_FUNCTION_TIMEOUT;
+		 }
+	}
+
+	ha->flags.mbox_busy = 0;
+
+	/* Clean up */
+	ha->mcp = NULL;
+
+	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x101a,
+		    "Checking for additional resp interrupt.\n");
+
+		/* polling mode for non isp_abort commands. */
+		qla2x00_poll(ha->rsp_q_map[0]);
+	}
+
+	if (rval == QLA_FUNCTION_TIMEOUT &&
+	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+		    ha->flags.eeh_busy) {
+			/* not in dpc. schedule it for dpc to take over. */
+			ql_dbg(ql_dbg_mbx, vha, 0x101b,
+			    "Timeout, schedule isp_abort_needed.\n");
+
+			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+				if (IS_QLA82XX(ha)) {
+					ql_dbg(ql_dbg_mbx, vha, 0x112a,
+					    "disabling pause transmit on port "
+					    "0 & 1.\n");
+					qla82xx_wr_32(ha,
+					    QLA82XX_CRB_NIU + 0x98,
+					    CRB_NIU_XG_PAUSE_CTL_P0|
+					    CRB_NIU_XG_PAUSE_CTL_P1);
+				}
+				ql_log(ql_log_info, base_vha, 0x101c,
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
+				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+				    "abort.\n", command, mcp->mb[0],
+				    ha->flags.eeh_busy);
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+		} else if (!abort_active) {
+			/* call abort directly since we are in the DPC thread */
+			ql_dbg(ql_dbg_mbx, vha, 0x101d,
+			    "Timeout, calling abort_isp.\n");
+
+			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+				if (IS_QLA82XX(ha)) {
+					ql_dbg(ql_dbg_mbx, vha, 0x112b,
+					    "disabling pause transmit on port "
+					    "0 & 1.\n");
+					qla82xx_wr_32(ha,
+					    QLA82XX_CRB_NIU + 0x98,
+					    CRB_NIU_XG_PAUSE_CTL_P0|
+					    CRB_NIU_XG_PAUSE_CTL_P1);
+				}
+				ql_log(ql_log_info, base_vha, 0x101e,
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
+				    "mb[0]=0x%x. Scheduling ISP abort ",
+				    command, mcp->mb[0]);
+				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				/* Allow next mbx cmd to come in. */
+				complete(&ha->mbx_cmd_comp);
+				if (ha->isp_ops->abort_isp(vha)) {
+					/* Failed. retry later. */
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+				}
+				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+				ql_dbg(ql_dbg_mbx, vha, 0x101f,
+				    "Finished abort_isp.\n");
+				goto mbx_done;
+			}
+		}
+	}
+
+premature_exit:
+	/* Allow next mbx cmd to come in. */
+	complete(&ha->mbx_cmd_comp);
+
+mbx_done:
+	if (rval) {
+		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
+			pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
+			    dev_name(&ha->pdev->dev), 0x1020+0x800,
+			    vha->host_no);
+			mboxes = mcp->in_mb;
+			cnt = 4;
+			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
+				if (mboxes & BIT_0) {
+					printk(" mb[%u]=%x", i, mcp->mb[i]);
+					cnt--;
+				}
+			pr_warn(" cmd=%x ****\n", command);
+		}
+		ql_dbg(ql_dbg_mbx, vha, 0x1198,
+		    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
+		    RD_REG_DWORD(&reg->isp24.host_status),
+		    RD_REG_DWORD(&reg->isp24.ictrl),
+		    RD_REG_DWORD(&reg->isp24.istatus));
+	} else {
+		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
+    uint32_t risc_code_size)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+	    "Entered %s.\n", __func__);
+
+	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
+		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
+		mcp->mb[8] = MSW(risc_addr);
+		mcp->out_mb = MBX_8|MBX_0;
+	} else {
+		mcp->mb[0] = MBC_LOAD_RISC_RAM;
+		mcp->out_mb = MBX_0;
+	}
+	mcp->mb[1] = LSW(risc_addr);
+	mcp->mb[2] = MSW(req_dma);
+	mcp->mb[3] = LSW(req_dma);
+	mcp->mb[6] = MSW(MSD(req_dma));
+	mcp->mb[7] = LSW(MSD(req_dma));
+	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
+	if (IS_FWI2_CAPABLE(ha)) {
+		mcp->mb[4] = MSW(risc_code_size);
+		mcp->mb[5] = LSW(risc_code_size);
+		mcp->out_mb |= MBX_5|MBX_4;
+	} else {
+		mcp->mb[4] = LSW(risc_code_size);
+		mcp->out_mb |= MBX_4;
+	}
+
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1023,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+#define	EXTENDED_BB_CREDITS	BIT_0
+#define	NVME_ENABLE_FLAG	BIT_3
+static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
+{
+	uint16_t mb4 = BIT_0;
+
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
+
+	return mb4;
+}
+
+static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
+{
+	uint16_t mb4 = BIT_0;
+
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		struct nvram_81xx *nv = ha->nvram;
+
+		mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
+	}
+
+	return mb4;
+}
+
+/*
+ * qla2x00_execute_fw
+ *     Start adapter firmware.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     TARGET_QUEUE_LOCK must be released.
+ *     ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *     qla2x00 local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+int
+qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(ha)) {
+		mcp->mb[1] = MSW(risc_addr);
+		mcp->mb[2] = LSW(risc_addr);
+		mcp->mb[3] = 0;
+		mcp->mb[4] = 0;
+		mcp->mb[11] = 0;
+		ha->flags.using_lr_setting = 0;
+		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+		    IS_QLA27XX(ha)) {
+			if (ql2xautodetectsfp) {
+				if (ha->flags.detected_lr_sfp) {
+					mcp->mb[4] |=
+					    qla25xx_set_sfp_lr_dist(ha);
+					ha->flags.using_lr_setting = 1;
+				}
+			} else {
+				struct nvram_81xx *nv = ha->nvram;
+				/* set LR distance if specified in nvram */
+				if (nv->enhanced_features &
+				    NEF_LR_DIST_ENABLE) {
+					mcp->mb[4] |=
+					    qla25xx_set_nvr_lr_dist(ha);
+					ha->flags.using_lr_setting = 1;
+				}
+			}
+		}
+
+		if (ql2xnvmeenable && IS_QLA27XX(ha))
+			mcp->mb[4] |= NVME_ENABLE_FLAG;
+
+		if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+			struct nvram_81xx *nv = ha->nvram;
+			/* set minimum speed if specified in nvram */
+			if (nv->min_link_speed >= 2 &&
+			    nv->min_link_speed <= 5) {
+				mcp->mb[4] |= BIT_4;
+				mcp->mb[11] = nv->min_link_speed;
+				mcp->out_mb |= MBX_11;
+				mcp->in_mb |= BIT_5;
+				vha->min_link_speed_feat = nv->min_link_speed;
+			}
+		}
+
+		if (ha->flags.exlogins_enabled)
+			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
+
+		if (ha->flags.exchoffld_enabled)
+			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
+
+		mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
+		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
+	} else {
+		mcp->mb[1] = LSW(risc_addr);
+		mcp->out_mb |= MBX_1;
+		if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+			mcp->mb[2] = 0;
+			mcp->out_mb |= MBX_2;
+		}
+	}
+
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1026,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		if (IS_FWI2_CAPABLE(ha)) {
+			ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
+			ql_dbg(ql_dbg_mbx, vha, 0x119a,
+			    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+			ql_dbg(ql_dbg_mbx, vha, 0x1027,
+			    "exchanges=%x.\n", mcp->mb[1]);
+			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+				ha->max_speed_sup = mcp->mb[2] & BIT_0;
+				ql_dbg(ql_dbg_mbx, vha, 0x119b,
+				    "Maximum speed supported=%s.\n",
+				    ha->max_speed_sup ? "32Gps" : "16Gps");
+				if (vha->min_link_speed_feat) {
+					ha->min_link_speed = mcp->mb[5];
+					ql_dbg(ql_dbg_mbx, vha, 0x119c,
+					    "Minimum speed set=%s.\n",
+					    mcp->mb[5] == 5 ? "32Gps" :
+					    mcp->mb[5] == 4 ? "16Gps" :
+					    mcp->mb[5] == 3 ? "8Gps" :
+					    mcp->mb[5] == 2 ? "4Gps" :
+						"unknown");
+				}
+			}
+		}
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+		    "Done.\n");
+	}
+
+	return rval;
+}
+
+/*
+ * qla_get_exlogin_status
+ *	Get extended login status
+ *	uses the memory offload control/status Mailbox
+ *
+ * Input:
+ *	ha:		adapter state pointer.
+ *	fwopt:		firmware options
+ *
+ * Returns:
+ *	qla2x00 local function status
+ *
+ * Context:
+ *	Kernel context.
+ */
+#define	FETCH_XLOGINS_STAT	0x8
+int
+qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
+	uint16_t *ex_logins_cnt)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+	mcp->mb[1] = FETCH_XLOGINS_STAT;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_10|MBX_4|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
+	} else {
+		*buf_sz = mcp->mb[4];
+		*ex_logins_cnt = mcp->mb[10];
+
+		ql_log(ql_log_info, vha, 0x1190,
+		    "buffer size 0x%x, exchange login count=%d\n",
+		    mcp->mb[4], mcp->mb[10]);
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla_set_exlogin_mem_cfg
+ *	set extended login memory configuration
+ *	Mbx needs to be issues before init_cb is set
+ *
+ * Input:
+ *	ha:		adapter state pointer.
+ *	buffer:		buffer pointer
+ *	phys_addr:	physical address of buffer
+ *	size:		size of buffer
+ *	TARGET_QUEUE_LOCK must be released
+ *	ADAPTER_STATE_LOCK must be release
+ *
+ * Returns:
+ *	qla2x00 local funxtion status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+#define CONFIG_XLOGINS_MEM	0x3
+int
+qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
+{
+	int		rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+	mcp->mb[1] = CONFIG_XLOGINS_MEM;
+	mcp->mb[2] = MSW(phys_addr);
+	mcp->mb[3] = LSW(phys_addr);
+	mcp->mb[6] = MSW(MSD(phys_addr));
+	mcp->mb[7] = LSW(MSD(phys_addr));
+	mcp->mb[8] = MSW(ha->exlogin_size);
+	mcp->mb[9] = LSW(ha->exlogin_size);
+	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_11|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla_get_exchoffld_status
+ *	Get exchange offload status
+ *	uses the memory offload control/status Mailbox
+ *
+ * Input:
+ *	ha:		adapter state pointer.
+ *	fwopt:		firmware options
+ *
+ * Returns:
+ *	qla2x00 local function status
+ *
+ * Context:
+ *	Kernel context.
+ */
+#define	FETCH_XCHOFFLD_STAT	0x2
+int
+qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
+	uint16_t *ex_logins_cnt)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+	mcp->mb[1] = FETCH_XCHOFFLD_STAT;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_10|MBX_4|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
+	} else {
+		*buf_sz = mcp->mb[4];
+		*ex_logins_cnt = mcp->mb[10];
+
+		ql_log(ql_log_info, vha, 0x118e,
+		    "buffer size 0x%x, exchange offload count=%d\n",
+		    mcp->mb[4], mcp->mb[10]);
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla_set_exchoffld_mem_cfg
+ *	Set exchange offload memory configuration
+ *	Mbx needs to be issues before init_cb is set
+ *
+ * Input:
+ *	ha:		adapter state pointer.
+ *	buffer:		buffer pointer
+ *	phys_addr:	physical address of buffer
+ *	size:		size of buffer
+ *	TARGET_QUEUE_LOCK must be released
+ *	ADAPTER_STATE_LOCK must be release
+ *
+ * Returns:
+ *	qla2x00 local funxtion status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+#define CONFIG_XCHOFFLD_MEM	0x3
+int
+qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
+{
+	int		rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
+	mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
+	mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
+	mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
+	mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
+	mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
+	mcp->mb[8] = MSW(ha->exchoffld_size);
+	mcp->mb[9] = LSW(ha->exchoffld_size);
+	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_11|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_fw_version
+ *	Get firmware version.
+ *
+ * Input:
+ *	ha:		adapter state pointer.
+ *	major:		pointer for major number.
+ *	minor:		pointer for minor number.
+ *	subminor:	pointer for subminor number.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_fw_version(scsi_qla_host_t *vha)
+{
+	int		rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
+		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
+	if (IS_FWI2_CAPABLE(ha))
+		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
+	if (IS_QLA27XX(ha))
+		mcp->in_mb |=
+		    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
+		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
+
+	mcp->flags = 0;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS)
+		goto failed;
+
+	/* Return mailbox data. */
+	ha->fw_major_version = mcp->mb[1];
+	ha->fw_minor_version = mcp->mb[2];
+	ha->fw_subminor_version = mcp->mb[3];
+	ha->fw_attributes = mcp->mb[6];
+	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
+		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
+	else
+		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+
+	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
+		ha->mpi_version[0] = mcp->mb[10] & 0xff;
+		ha->mpi_version[1] = mcp->mb[11] >> 8;
+		ha->mpi_version[2] = mcp->mb[11] & 0xff;
+		ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
+		ha->phy_version[0] = mcp->mb[8] & 0xff;
+		ha->phy_version[1] = mcp->mb[9] >> 8;
+		ha->phy_version[2] = mcp->mb[9] & 0xff;
+	}
+
+	if (IS_FWI2_CAPABLE(ha)) {
+		ha->fw_attributes_h = mcp->mb[15];
+		ha->fw_attributes_ext[0] = mcp->mb[16];
+		ha->fw_attributes_ext[1] = mcp->mb[17];
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
+		    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
+		    __func__, mcp->mb[15], mcp->mb[6]);
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
+		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
+		    __func__, mcp->mb[17], mcp->mb[16]);
+
+		if (ha->fw_attributes_h & 0x4)
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
+			    "%s: Firmware supports Extended Login 0x%x\n",
+			    __func__, ha->fw_attributes_h);
+
+		if (ha->fw_attributes_h & 0x8)
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
+			    "%s: Firmware supports Exchange Offload 0x%x\n",
+			    __func__, ha->fw_attributes_h);
+
+		/*
+		 * FW supports nvme and driver load parameter requested nvme.
+		 * BIT 26 of fw_attributes indicates NVMe support.
+		 */
+		if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
+			vha->flags.nvme_enabled = 1;
+
+	}
+
+	if (IS_QLA27XX(ha)) {
+		ha->mpi_version[0] = mcp->mb[10] & 0xff;
+		ha->mpi_version[1] = mcp->mb[11] >> 8;
+		ha->mpi_version[2] = mcp->mb[11] & 0xff;
+		ha->pep_version[0] = mcp->mb[13] & 0xff;
+		ha->pep_version[1] = mcp->mb[14] >> 8;
+		ha->pep_version[2] = mcp->mb[14] & 0xff;
+		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
+		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
+		ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
+		ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
+	}
+
+failed:
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/*
+ * qla2x00_get_fw_options
+ *	Set firmware options.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	fwopt = pointer for firmware options.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
+	} else {
+		fwopts[0] = mcp->mb[0];
+		fwopts[1] = mcp->mb[1];
+		fwopts[2] = mcp->mb[2];
+		fwopts[3] = mcp->mb[3];
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+
+/*
+ * qla2x00_set_fw_options
+ *	Set firmware options.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	fwopt = pointer for firmware options.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
+	mcp->mb[1] = fwopts[1];
+	mcp->mb[2] = fwopts[2];
+	mcp->mb[3] = fwopts[3];
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(vha->hw)) {
+		mcp->in_mb |= MBX_1;
+		mcp->mb[10] = fwopts[10];
+		mcp->out_mb |= MBX_10;
+	} else {
+		mcp->mb[10] = fwopts[10];
+		mcp->mb[11] = fwopts[11];
+		mcp->mb[12] = 0;	/* Undocumented, but used */
+		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+	}
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	fwopts[0] = mcp->mb[0];
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1030,
+		    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_mbx_reg_test
+ *	Mailbox register wrap test.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+	mcp->mb[1] = 0xAAAA;
+	mcp->mb[2] = 0x5555;
+	mcp->mb[3] = 0xAA55;
+	mcp->mb[4] = 0x55AA;
+	mcp->mb[5] = 0xA5A5;
+	mcp->mb[6] = 0x5A5A;
+	mcp->mb[7] = 0x2525;
+	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval == QLA_SUCCESS) {
+		if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
+		    mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
+		    mcp->mb[7] != 0x2525)
+			rval = QLA_FUNCTION_FAILED;
+	}
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_verify_checksum
+ *	Verify firmware checksum.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(vha->hw)) {
+		mcp->mb[1] = MSW(risc_addr);
+		mcp->mb[2] = LSW(risc_addr);
+		mcp->out_mb |= MBX_2|MBX_1;
+		mcp->in_mb |= MBX_2|MBX_1;
+	} else {
+		mcp->mb[1] = LSW(risc_addr);
+		mcp->out_mb |= MBX_1;
+		mcp->in_mb |= MBX_1;
+	}
+
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1036,
+		    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
+		    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_issue_iocb
+ *	Issue IOCB using mailbox command
+ *
+ * Input:
+ *	ha = adapter state pointer.
+ *	buffer = buffer pointer.
+ *	phys_addr = physical address of buffer.
+ *	size = size of buffer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
+    dma_addr_t phys_addr, size_t size, uint32_t tov)
+{
+	int		rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_IOCB_COMMAND_A64;
+	mcp->mb[1] = 0;
+	mcp->mb[2] = MSW(phys_addr);
+	mcp->mb[3] = LSW(phys_addr);
+	mcp->mb[6] = MSW(MSD(phys_addr));
+	mcp->mb[7] = LSW(MSD(phys_addr));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_0;
+	mcp->tov = tov;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
+	} else {
+		sts_entry_t *sts_entry = (sts_entry_t *) buffer;
+
+		/* Mask reserved bits. */
+		sts_entry->entry_status &=
+		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
+    size_t size)
+{
+	return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
+	    MBX_TOV_SECONDS);
+}
+
+/*
+ * qla2x00_abort_command
+ *	Abort command aborts a specified IOCB.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	sp = SB structure pointer.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_abort_command(srb_t *sp)
+{
+	unsigned long   flags = 0;
+	int		rval;
+	uint32_t	handle = 0;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+	fc_port_t	*fcport = sp->fcport;
+	scsi_qla_host_t *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+	    "Entered %s.\n", __func__);
+
+	if (vha->flags.qpairs_available && sp->qpair)
+		req = sp->qpair->req;
+	else
+		req = vha->req;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+		if (req->outstanding_cmds[handle] == sp)
+			break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (handle == req->num_outstanding_cmds) {
+		/* command not found */
+		return QLA_FUNCTION_FAILED;
+	}
+
+	mcp->mb[0] = MBC_ABORT_COMMAND;
+	if (HAS_EXTENDED_IDS(ha))
+		mcp->mb[1] = fcport->loop_id;
+	else
+		mcp->mb[1] = fcport->loop_id << 8;
+	mcp->mb[2] = (uint16_t)handle;
+	mcp->mb[3] = (uint16_t)(handle >> 16);
+	mcp->mb[6] = (uint16_t)cmd->device->lun;
+	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
+{
+	int rval, rval2;
+	mbx_cmd_t  mc;
+	mbx_cmd_t  *mcp = &mc;
+	scsi_qla_host_t *vha;
+	struct req_que *req;
+	struct rsp_que *rsp;
+
+	l = l;
+	vha = fcport->vha;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+	    "Entered %s.\n", __func__);
+
+	req = vha->hw->req_q_map[0];
+	rsp = req->rsp;
+	mcp->mb[0] = MBC_ABORT_TARGET;
+	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
+	if (HAS_EXTENDED_IDS(vha->hw)) {
+		mcp->mb[1] = fcport->loop_id;
+		mcp->mb[10] = 0;
+		mcp->out_mb |= MBX_10;
+	} else {
+		mcp->mb[1] = fcport->loop_id << 8;
+	}
+	mcp->mb[2] = vha->hw->loop_reset_delay;
+	mcp->mb[9] = vha->vp_idx;
+
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+		    "Failed=%x.\n", rval);
+	}
+
+	/* Issue marker IOCB. */
+	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
+							MK_SYNC_ID);
+	if (rval2 != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1040,
+		    "Failed to issue marker IOCB (%x).\n", rval2);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
+{
+	int rval, rval2;
+	mbx_cmd_t  mc;
+	mbx_cmd_t  *mcp = &mc;
+	scsi_qla_host_t *vha;
+	struct req_que *req;
+	struct rsp_que *rsp;
+
+	vha = fcport->vha;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+	    "Entered %s.\n", __func__);
+
+	req = vha->hw->req_q_map[0];
+	rsp = req->rsp;
+	mcp->mb[0] = MBC_LUN_RESET;
+	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (HAS_EXTENDED_IDS(vha->hw))
+		mcp->mb[1] = fcport->loop_id;
+	else
+		mcp->mb[1] = fcport->loop_id << 8;
+	mcp->mb[2] = (u32)l;
+	mcp->mb[3] = 0;
+	mcp->mb[9] = vha->vp_idx;
+
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
+	}
+
+	/* Issue marker IOCB. */
+	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+								MK_SYNC_ID_LUN);
+	if (rval2 != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1044,
+		    "Failed to issue marker IOCB (%x).\n", rval2);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_adapter_id
+ *	Get adapter ID and topology.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	id = pointer for loop ID.
+ *	al_pa = pointer for AL_PA.
+ *	area = pointer for area.
+ *	domain = pointer for domain.
+ *	top = pointer for topology.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
+    uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
+	mcp->mb[9] = vha->vp_idx;
+	mcp->out_mb = MBX_9|MBX_0;
+	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (IS_CNA_CAPABLE(vha->hw))
+		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
+	if (IS_FWI2_CAPABLE(vha->hw))
+		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
+	if (IS_QLA27XX(vha->hw))
+		mcp->in_mb |= MBX_15;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (mcp->mb[0] == MBS_COMMAND_ERROR)
+		rval = QLA_COMMAND_ERROR;
+	else if (mcp->mb[0] == MBS_INVALID_COMMAND)
+		rval = QLA_INVALID_COMMAND;
+
+	/* Return data. */
+	*id = mcp->mb[1];
+	*al_pa = LSB(mcp->mb[2]);
+	*area = MSB(mcp->mb[2]);
+	*domain	= LSB(mcp->mb[3]);
+	*top = mcp->mb[6];
+	*sw_cap = mcp->mb[7];
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+		    "Done %s.\n", __func__);
+
+		if (IS_CNA_CAPABLE(vha->hw)) {
+			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
+			vha->fcoe_fcf_idx = mcp->mb[10];
+			vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
+			vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
+			vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
+			vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
+			vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
+			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
+		}
+		/* If FA-WWN supported */
+		if (IS_FAWWN_CAPABLE(vha->hw)) {
+			if (mcp->mb[7] & BIT_14) {
+				vha->port_name[0] = MSB(mcp->mb[16]);
+				vha->port_name[1] = LSB(mcp->mb[16]);
+				vha->port_name[2] = MSB(mcp->mb[17]);
+				vha->port_name[3] = LSB(mcp->mb[17]);
+				vha->port_name[4] = MSB(mcp->mb[18]);
+				vha->port_name[5] = LSB(mcp->mb[18]);
+				vha->port_name[6] = MSB(mcp->mb[19]);
+				vha->port_name[7] = LSB(mcp->mb[19]);
+				fc_host_port_name(vha->host) =
+				    wwn_to_u64(vha->port_name);
+				ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+				    "FA-WWN acquired %016llx\n",
+				    wwn_to_u64(vha->port_name));
+			}
+		}
+
+		if (IS_QLA27XX(vha->hw))
+			vha->bbcr = mcp->mb[15];
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_retry_cnt
+ *	Get current firmware login retry count and delay.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	retry_cnt = pointer to login retry count.
+ *	tov = pointer to login timeout value.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
+    uint16_t *r_a_tov)
+{
+	int rval;
+	uint16_t ratov;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_RETRY_COUNT;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x104a,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		/* Convert returned data and check our values. */
+		*r_a_tov = mcp->mb[3] / 2;
+		ratov = (mcp->mb[3]/2) / 10;  /* mb[3] value is in 100ms */
+		if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
+			/* Update to the larger values */
+			*retry_cnt = (uint8_t)mcp->mb[1];
+			*tov = ratov;
+		}
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
+		    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_init_firmware
+ *	Initialize adapter firmware.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	dptr = Initialization control block pointer.
+ *	size = size of initialization control block.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+	    "Entered %s.\n", __func__);
+
+	if (IS_P3P_TYPE(ha) && ql2xdbwr)
+		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
+			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
+
+	if (ha->flags.npiv_supported)
+		mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
+	else
+		mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+	mcp->mb[1] = 0;
+	mcp->mb[2] = MSW(ha->init_cb_dma);
+	mcp->mb[3] = LSW(ha->init_cb_dma);
+	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
+	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
+		mcp->mb[1] = BIT_0;
+		mcp->mb[10] = MSW(ha->ex_init_cb_dma);
+		mcp->mb[11] = LSW(ha->ex_init_cb_dma);
+		mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
+		mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
+		mcp->mb[14] = sizeof(*ha->ex_init_cb);
+		mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
+	}
+	/* 1 and 2 should normally be captured. */
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		/* mb3 is additional info about the installed SFP. */
+		mcp->in_mb  |= MBX_3;
+	mcp->buf_size = size;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x104d,
+		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
+	} else {
+		if (IS_QLA27XX(ha)) {
+			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+				ql_dbg(ql_dbg_mbx, vha, 0x119d,
+				    "Invalid SFP/Validation Failed\n");
+		}
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+
+/*
+ * qla2x00_get_port_database
+ *	Issue normal/enhanced get port database mailbox command
+ *	and copy device name as necessary.
+ *
+ * Input:
+ *	ha = adapter state pointer.
+ *	dev = structure pointer.
+ *	opt = enhanced cmd option byte.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	port_database_t *pd;
+	struct port_database_24xx *pd24;
+	dma_addr_t pd_dma;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+	    "Entered %s.\n", __func__);
+
+	pd24 = NULL;
+	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+	if (pd  == NULL) {
+		ql_log(ql_log_warn, vha, 0x1050,
+		    "Failed to allocate port database structure.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+	mcp->mb[0] = MBC_GET_PORT_DATABASE;
+	if (opt != 0 && !IS_FWI2_CAPABLE(ha))
+		mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
+	mcp->mb[2] = MSW(pd_dma);
+	mcp->mb[3] = LSW(pd_dma);
+	mcp->mb[6] = MSW(MSD(pd_dma));
+	mcp->mb[7] = LSW(MSD(pd_dma));
+	mcp->mb[9] = vha->vp_idx;
+	mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+	mcp->in_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(ha)) {
+		mcp->mb[1] = fcport->loop_id;
+		mcp->mb[10] = opt;
+		mcp->out_mb |= MBX_10|MBX_1;
+		mcp->in_mb |= MBX_1;
+	} else if (HAS_EXTENDED_IDS(ha)) {
+		mcp->mb[1] = fcport->loop_id;
+		mcp->mb[10] = opt;
+		mcp->out_mb |= MBX_10|MBX_1;
+	} else {
+		mcp->mb[1] = fcport->loop_id << 8 | opt;
+		mcp->out_mb |= MBX_1;
+	}
+	mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
+	    PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
+	mcp->flags = MBX_DMA_IN;
+	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS)
+		goto gpd_error_out;
+
+	if (IS_FWI2_CAPABLE(ha)) {
+		uint64_t zero = 0;
+		pd24 = (struct port_database_24xx *) pd;
+
+		/* Check for logged in state. */
+		if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
+		    pd24->last_login_state != PDS_PRLI_COMPLETE) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1051,
+			    "Unable to verify login-state (%x/%x) for "
+			    "loop_id %x.\n", pd24->current_login_state,
+			    pd24->last_login_state, fcport->loop_id);
+			rval = QLA_FUNCTION_FAILED;
+			goto gpd_error_out;
+		}
+
+		if (fcport->loop_id == FC_NO_LOOP_ID ||
+		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+		     memcmp(fcport->port_name, pd24->port_name, 8))) {
+			/* We lost the device mid way. */
+			rval = QLA_NOT_LOGGED_IN;
+			goto gpd_error_out;
+		}
+
+		/* Names are little-endian. */
+		memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
+		memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
+
+		/* Get port_id of device. */
+		fcport->d_id.b.domain = pd24->port_id[0];
+		fcport->d_id.b.area = pd24->port_id[1];
+		fcport->d_id.b.al_pa = pd24->port_id[2];
+		fcport->d_id.b.rsvd_1 = 0;
+
+		/* If not target must be initiator or unknown type. */
+		if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
+			fcport->port_type = FCT_INITIATOR;
+		else
+			fcport->port_type = FCT_TARGET;
+
+		/* Passback COS information. */
+		fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+				FC_COS_CLASS2 : FC_COS_CLASS3;
+
+		if (pd24->prli_svc_param_word_3[0] & BIT_7)
+			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+	} else {
+		uint64_t zero = 0;
+
+		/* Check for logged in state. */
+		if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
+		    pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
+			ql_dbg(ql_dbg_mbx, vha, 0x100a,
+			    "Unable to verify login-state (%x/%x) - "
+			    "portid=%02x%02x%02x.\n", pd->master_state,
+			    pd->slave_state, fcport->d_id.b.domain,
+			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
+			rval = QLA_FUNCTION_FAILED;
+			goto gpd_error_out;
+		}
+
+		if (fcport->loop_id == FC_NO_LOOP_ID ||
+		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+		     memcmp(fcport->port_name, pd->port_name, 8))) {
+			/* We lost the device mid way. */
+			rval = QLA_NOT_LOGGED_IN;
+			goto gpd_error_out;
+		}
+
+		/* Names are little-endian. */
+		memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+		memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+		/* Get port_id of device. */
+		fcport->d_id.b.domain = pd->port_id[0];
+		fcport->d_id.b.area = pd->port_id[3];
+		fcport->d_id.b.al_pa = pd->port_id[2];
+		fcport->d_id.b.rsvd_1 = 0;
+
+		/* If not target must be initiator or unknown type. */
+		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+			fcport->port_type = FCT_INITIATOR;
+		else
+			fcport->port_type = FCT_TARGET;
+
+		/* Passback COS information. */
+		fcport->supported_classes = (pd->options & BIT_4) ?
+		    FC_COS_CLASS2: FC_COS_CLASS3;
+	}
+
+gpd_error_out:
+	dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1052,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
+		    mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_firmware_state
+ *	Get adapter firmware state.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	dptr = pointer for firmware state.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+	mcp->out_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(vha->hw))
+		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	else
+		mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	/* Return firmware states. */
+	states[0] = mcp->mb[1];
+	if (IS_FWI2_CAPABLE(vha->hw)) {
+		states[1] = mcp->mb[2];
+		states[2] = mcp->mb[3];  /* SFP info */
+		states[3] = mcp->mb[4];
+		states[4] = mcp->mb[5];
+		states[5] = mcp->mb[6];  /* DPORT status */
+	}
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
+	} else {
+		if (IS_QLA27XX(ha)) {
+			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+				ql_dbg(ql_dbg_mbx, vha, 0x119e,
+				    "Invalid SFP/Validation Failed\n");
+		}
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_port_name
+ *	Issue get port name mailbox command.
+ *	Returned name is in big endian format.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	loop_id = loop ID of device.
+ *	name = pointer for name.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
+    uint8_t opt)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_PORT_NAME;
+	mcp->mb[9] = vha->vp_idx;
+	mcp->out_mb = MBX_9|MBX_1|MBX_0;
+	if (HAS_EXTENDED_IDS(vha->hw)) {
+		mcp->mb[1] = loop_id;
+		mcp->mb[10] = opt;
+		mcp->out_mb |= MBX_10;
+	} else {
+		mcp->mb[1] = loop_id << 8 | opt;
+	}
+
+	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
+	} else {
+		if (name != NULL) {
+			/* This function returns name in big endian. */
+			name[0] = MSB(mcp->mb[2]);
+			name[1] = LSB(mcp->mb[2]);
+			name[2] = MSB(mcp->mb[3]);
+			name[3] = LSB(mcp->mb[3]);
+			name[4] = MSB(mcp->mb[6]);
+			name[5] = LSB(mcp->mb[6]);
+			name[6] = MSB(mcp->mb[7]);
+			name[7] = LSB(mcp->mb[7]);
+		}
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla24xx_link_initialization
+ *	Issue link initialization mailbox command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_LINK_INITIALIZATION;
+	mcp->mb[1] = BIT_4;
+	if (vha->hw->operating_mode == LOOP)
+		mcp->mb[1] |= BIT_6;
+	else
+		mcp->mb[1] |= BIT_5;
+	mcp->mb[2] = 0;
+	mcp->mb[3] = 0;
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_lip_reset
+ *	Issue LIP reset mailbox command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_lip_reset(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+	    "Entered %s.\n", __func__);
+
+	if (IS_CNA_CAPABLE(vha->hw)) {
+		/* Logout across all FCFs. */
+		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+		mcp->mb[1] = BIT_1;
+		mcp->mb[2] = 0;
+		mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	} else if (IS_FWI2_CAPABLE(vha->hw)) {
+		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+		mcp->mb[1] = BIT_6;
+		mcp->mb[2] = 0;
+		mcp->mb[3] = vha->hw->loop_reset_delay;
+		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	} else {
+		mcp->mb[0] = MBC_LIP_RESET;
+		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+		if (HAS_EXTENDED_IDS(vha->hw)) {
+			mcp->mb[1] = 0x00ff;
+			mcp->mb[10] = 0;
+			mcp->out_mb |= MBX_10;
+		} else {
+			mcp->mb[1] = 0xff00;
+		}
+		mcp->mb[2] = vha->hw->loop_reset_delay;
+		mcp->mb[3] = 0;
+	}
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_send_sns
+ *	Send SNS command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	sns = pointer for command.
+ *	cmd_size = command size.
+ *	buf_size = response/command size.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
+    uint16_t cmd_size, size_t buf_size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+	    "Entered %s.\n", __func__);
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
+	    "Retry cnt=%d ratov=%d total tov=%d.\n",
+	    vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
+
+	mcp->mb[0] = MBC_SEND_SNS_COMMAND;
+	mcp->mb[1] = cmd_size;
+	mcp->mb[2] = MSW(sns_phys_address);
+	mcp->mb[3] = LSW(sns_phys_address);
+	mcp->mb[6] = MSW(MSD(sns_phys_address));
+	mcp->mb[7] = LSW(MSD(sns_phys_address));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0|MBX_1;
+	mcp->buf_size = buf_size;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
+	mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x105f,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+    uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
+{
+	int		rval;
+
+	struct logio_entry_24xx *lg;
+	dma_addr_t	lg_dma;
+	uint32_t	iop[2];
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+	    "Entered %s.\n", __func__);
+
+	if (vha->vp_idx && vha->qpair)
+		req = vha->qpair->req;
+	else
+		req = ha->req_q_map[0];
+
+	lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+	if (lg == NULL) {
+		ql_log(ql_log_warn, vha, 0x1062,
+		    "Failed to allocate login IOCB.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(lg, 0, sizeof(struct logio_entry_24xx));
+
+	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	lg->entry_count = 1;
+	lg->handle = MAKE_HANDLE(req->id, lg->handle);
+	lg->nport_handle = cpu_to_le16(loop_id);
+	lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+	if (opt & BIT_0)
+		lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+	if (opt & BIT_1)
+		lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+	lg->port_id[0] = al_pa;
+	lg->port_id[1] = area;
+	lg->port_id[2] = domain;
+	lg->vp_index = vha->vp_idx;
+	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+	    (ha->r_a_tov / 10 * 2) + 2);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1063,
+		    "Failed to issue login IOCB (%x).\n", rval);
+	} else if (lg->entry_status != 0) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1064,
+		    "Failed to complete IOCB -- error status (%x).\n",
+		    lg->entry_status);
+		rval = QLA_FUNCTION_FAILED;
+	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
+		iop[0] = le32_to_cpu(lg->io_parameter[0]);
+		iop[1] = le32_to_cpu(lg->io_parameter[1]);
+
+		ql_dbg(ql_dbg_mbx, vha, 0x1065,
+		    "Failed to complete IOCB -- completion  status (%x) "
+		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
+		    iop[0], iop[1]);
+
+		switch (iop[0]) {
+		case LSC_SCODE_PORTID_USED:
+			mb[0] = MBS_PORT_ID_USED;
+			mb[1] = LSW(iop[1]);
+			break;
+		case LSC_SCODE_NPORT_USED:
+			mb[0] = MBS_LOOP_ID_USED;
+			break;
+		case LSC_SCODE_NOLINK:
+		case LSC_SCODE_NOIOCB:
+		case LSC_SCODE_NOXCB:
+		case LSC_SCODE_CMD_FAILED:
+		case LSC_SCODE_NOFABRIC:
+		case LSC_SCODE_FW_NOT_READY:
+		case LSC_SCODE_NOT_LOGGED_IN:
+		case LSC_SCODE_NOPCB:
+		case LSC_SCODE_ELS_REJECT:
+		case LSC_SCODE_CMD_PARAM_ERR:
+		case LSC_SCODE_NONPORT:
+		case LSC_SCODE_LOGGED_IN:
+		case LSC_SCODE_NOFLOGI_ACC:
+		default:
+			mb[0] = MBS_COMMAND_ERROR;
+			break;
+		}
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+		    "Done %s.\n", __func__);
+
+		iop[0] = le32_to_cpu(lg->io_parameter[0]);
+
+		mb[0] = MBS_COMMAND_COMPLETE;
+		mb[1] = 0;
+		if (iop[0] & BIT_4) {
+			if (iop[0] & BIT_8)
+				mb[1] |= BIT_1;
+		} else
+			mb[1] = BIT_0;
+
+		/* Passback COS information. */
+		mb[10] = 0;
+		if (lg->io_parameter[7] || lg->io_parameter[8])
+			mb[10] |= BIT_0;	/* Class 2. */
+		if (lg->io_parameter[9] || lg->io_parameter[10])
+			mb[10] |= BIT_1;	/* Class 3. */
+		if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
+			mb[10] |= BIT_7;	/* Confirmed Completion
+						 * Allowed
+						 */
+	}
+
+	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
+
+	return rval;
+}
+
+/*
+ * qla2x00_login_fabric
+ *	Issue login fabric port mailbox command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	loop_id = device loop ID.
+ *	domain = device domain.
+ *	area = device area.
+ *	al_pa = device AL_PA.
+ *	status = pointer for return status.
+ *	opt = command options.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+    uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	if (HAS_EXTENDED_IDS(ha)) {
+		mcp->mb[1] = loop_id;
+		mcp->mb[10] = opt;
+		mcp->out_mb |= MBX_10;
+	} else {
+		mcp->mb[1] = (loop_id << 8) | opt;
+	}
+	mcp->mb[2] = domain;
+	mcp->mb[3] = area << 8 | al_pa;
+
+	mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
+	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	/* Return mailbox statuses. */
+	if (mb != NULL) {
+		mb[0] = mcp->mb[0];
+		mb[1] = mcp->mb[1];
+		mb[2] = mcp->mb[2];
+		mb[6] = mcp->mb[6];
+		mb[7] = mcp->mb[7];
+		/* COS retrieved from Get-Port-Database mailbox command. */
+		mb[10] = 0;
+	}
+
+	if (rval != QLA_SUCCESS) {
+		/* RLU tmp code: need to change main mailbox_command function to
+		 * return ok even when the mailbox completion value is not
+		 * SUCCESS. The caller needs to be responsible to interpret
+		 * the return values of this mailbox command if we're not
+		 * to change too much of the existing code.
+		 */
+		if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
+		    mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
+		    mcp->mb[0] == 0x4006)
+			rval = QLA_SUCCESS;
+
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1068,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_login_local_device
+ *           Issue login loop port mailbox command.
+ *
+ * Input:
+ *           ha = adapter block pointer.
+ *           loop_id = device loop ID.
+ *           opt = command options.
+ *
+ * Returns:
+ *            Return status code.
+ *
+ * Context:
+ *            Kernel context.
+ *
+ */
+int
+qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
+    uint16_t *mb_ret, uint8_t opt)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+	    "Entered %s.\n", __func__);
+
+	if (IS_FWI2_CAPABLE(ha))
+		return qla24xx_login_fabric(vha, fcport->loop_id,
+		    fcport->d_id.b.domain, fcport->d_id.b.area,
+		    fcport->d_id.b.al_pa, mb_ret, opt);
+
+	mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
+	if (HAS_EXTENDED_IDS(ha))
+		mcp->mb[1] = fcport->loop_id;
+	else
+		mcp->mb[1] = fcport->loop_id << 8;
+	mcp->mb[2] = opt;
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ 	mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
+	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+ 	/* Return mailbox statuses. */
+ 	if (mb_ret != NULL) {
+ 		mb_ret[0] = mcp->mb[0];
+ 		mb_ret[1] = mcp->mb[1];
+ 		mb_ret[6] = mcp->mb[6];
+ 		mb_ret[7] = mcp->mb[7];
+ 	}
+
+	if (rval != QLA_SUCCESS) {
+ 		/* AV tmp code: need to change main mailbox_command function to
+ 		 * return ok even when the mailbox completion value is not
+ 		 * SUCCESS. The caller needs to be responsible to interpret
+ 		 * the return values of this mailbox command if we're not
+ 		 * to change too much of the existing code.
+ 		 */
+ 		if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
+ 			rval = QLA_SUCCESS;
+
+		ql_dbg(ql_dbg_mbx, vha, 0x106b,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+		    "Done %s.\n", __func__);
+	}
+
+	return (rval);
+}
+
+int
+qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+    uint8_t area, uint8_t al_pa)
+{
+	int		rval;
+	struct logio_entry_24xx *lg;
+	dma_addr_t	lg_dma;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+	    "Entered %s.\n", __func__);
+
+	lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+	if (lg == NULL) {
+		ql_log(ql_log_warn, vha, 0x106e,
+		    "Failed to allocate logout IOCB.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(lg, 0, sizeof(struct logio_entry_24xx));
+
+	req = vha->req;
+	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	lg->entry_count = 1;
+	lg->handle = MAKE_HANDLE(req->id, lg->handle);
+	lg->nport_handle = cpu_to_le16(loop_id);
+	lg->control_flags =
+	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
+		LCF_FREE_NPORT);
+	lg->port_id[0] = al_pa;
+	lg->port_id[1] = area;
+	lg->port_id[2] = domain;
+	lg->vp_index = vha->vp_idx;
+	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
+	    (ha->r_a_tov / 10 * 2) + 2);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x106f,
+		    "Failed to issue logout IOCB (%x).\n", rval);
+	} else if (lg->entry_status != 0) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1070,
+		    "Failed to complete IOCB -- error status (%x).\n",
+		    lg->entry_status);
+		rval = QLA_FUNCTION_FAILED;
+	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1071,
+		    "Failed to complete IOCB -- completion status (%x) "
+		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
+		    le32_to_cpu(lg->io_parameter[0]),
+		    le32_to_cpu(lg->io_parameter[1]));
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
+
+	return rval;
+}
+
+/*
+ * qla2x00_fabric_logout
+ *	Issue logout fabric port mailbox command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	loop_id = device loop ID.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
+    uint8_t area, uint8_t al_pa)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
+	mcp->out_mb = MBX_1|MBX_0;
+	if (HAS_EXTENDED_IDS(vha->hw)) {
+		mcp->mb[1] = loop_id;
+		mcp->mb[10] = 0;
+		mcp->out_mb |= MBX_10;
+	} else {
+		mcp->mb[1] = loop_id << 8;
+	}
+
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1074,
+		    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_full_login_lip
+ *	Issue full login LIP mailbox command.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_full_login_lip(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
+	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
+	mcp->mb[2] = 0;
+	mcp->mb[3] = 0;
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_id_list
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
+    uint16_t *entries)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+	    "Entered %s.\n", __func__);
+
+	if (id_list == NULL)
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_GET_ID_LIST;
+	mcp->out_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(vha->hw)) {
+		mcp->mb[2] = MSW(id_list_dma);
+		mcp->mb[3] = LSW(id_list_dma);
+		mcp->mb[6] = MSW(MSD(id_list_dma));
+		mcp->mb[7] = LSW(MSD(id_list_dma));
+		mcp->mb[8] = 0;
+		mcp->mb[9] = vha->vp_idx;
+		mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
+	} else {
+		mcp->mb[1] = MSW(id_list_dma);
+		mcp->mb[2] = LSW(id_list_dma);
+		mcp->mb[3] = MSW(MSD(id_list_dma));
+		mcp->mb[6] = LSW(MSD(id_list_dma));
+		mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
+	}
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
+	} else {
+		*entries = mcp->mb[1];
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_resource_cnts
+ *	Get current firmware resource counts.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
+		mcp->in_mb |= MBX_12;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x107d,
+		    "Failed mb[0]=%x.\n", mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
+		    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
+		    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
+		    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
+		    mcp->mb[11], mcp->mb[12]);
+
+		ha->orig_fw_tgt_xcb_count =  mcp->mb[1];
+		ha->cur_fw_tgt_xcb_count = mcp->mb[2];
+		ha->cur_fw_xcb_count = mcp->mb[3];
+		ha->orig_fw_xcb_count = mcp->mb[6];
+		ha->cur_fw_iocb_count = mcp->mb[7];
+		ha->orig_fw_iocb_count = mcp->mb[10];
+		if (ha->flags.npiv_supported)
+			ha->max_npiv_vports = mcp->mb[11];
+		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+			ha->fw_max_fcf_count = mcp->mb[12];
+	}
+
+	return (rval);
+}
+
+/*
+ * qla2x00_get_fcal_position_map
+ *	Get FCAL (LILP) position map using mailbox command
+ *
+ * Input:
+ *	ha = adapter state pointer.
+ *	pos_map = buffer pointer (can be NULL).
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	char *pmap;
+	dma_addr_t pmap_dma;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+	    "Entered %s.\n", __func__);
+
+	pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
+	if (pmap  == NULL) {
+		ql_log(ql_log_warn, vha, 0x1080,
+		    "Memory alloc failed.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(pmap, 0, FCAL_MAP_SIZE);
+
+	mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
+	mcp->mb[2] = MSW(pmap_dma);
+	mcp->mb[3] = LSW(pmap_dma);
+	mcp->mb[6] = MSW(MSD(pmap_dma));
+	mcp->mb[7] = LSW(MSD(pmap_dma));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->buf_size = FCAL_MAP_SIZE;
+	mcp->flags = MBX_DMA_IN;
+	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval == QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
+		    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
+		    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
+		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
+		    pmap, pmap[0] + 1);
+
+		if (pos_map)
+			memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+	}
+	dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qla2x00_get_link_status
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	loop_id = device loop ID.
+ *	ret_buf = pointer to link status return buffer.
+ *
+ * Returns:
+ *	0 = success.
+ *	BIT_0 = mem alloc error.
+ *	BIT_1 = mailbox error.
+ */
+int
+qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
+    struct link_statistics *stats, dma_addr_t stats_dma)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	uint32_t *iter = (void *)stats;
+	ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_LINK_STATUS;
+	mcp->mb[2] = MSW(LSD(stats_dma));
+	mcp->mb[3] = LSW(LSD(stats_dma));
+	mcp->mb[6] = MSW(MSD(stats_dma));
+	mcp->mb[7] = LSW(MSD(stats_dma));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+	mcp->in_mb = MBX_0;
+	if (IS_FWI2_CAPABLE(ha)) {
+		mcp->mb[1] = loop_id;
+		mcp->mb[4] = 0;
+		mcp->mb[10] = 0;
+		mcp->out_mb |= MBX_10|MBX_4|MBX_1;
+		mcp->in_mb |= MBX_1;
+	} else if (HAS_EXTENDED_IDS(ha)) {
+		mcp->mb[1] = loop_id;
+		mcp->mb[10] = 0;
+		mcp->out_mb |= MBX_10|MBX_1;
+	} else {
+		mcp->mb[1] = loop_id << 8;
+		mcp->out_mb |= MBX_1;
+	}
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = IOCTL_CMD;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval == QLA_SUCCESS) {
+		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1085,
+			    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			/* Re-endianize - firmware data is le32. */
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+			    "Done %s.\n", __func__);
+			for ( ; dwords--; iter++)
+				le32_to_cpus(iter);
+		}
+	} else {
+		/* Failed. */
+		ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
+	}
+
+	return rval;
+}
+
+int
+qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
+    dma_addr_t stats_dma, uint16_t options)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	uint32_t *iter, dwords;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+	    "Entered %s.\n", __func__);
+
+	memset(&mc, 0, sizeof(mc));
+	mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+	mc.mb[2] = MSW(stats_dma);
+	mc.mb[3] = LSW(stats_dma);
+	mc.mb[6] = MSW(MSD(stats_dma));
+	mc.mb[7] = LSW(MSD(stats_dma));
+	mc.mb[8] = sizeof(struct link_statistics) / 4;
+	mc.mb[9] = cpu_to_le16(vha->vp_idx);
+	mc.mb[10] = cpu_to_le16(options);
+
+	rval = qla24xx_send_mb_cmd(vha, &mc);
+
+	if (rval == QLA_SUCCESS) {
+		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1089,
+			    "Failed mb[0]=%x.\n", mcp->mb[0]);
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+			    "Done %s.\n", __func__);
+			/* Re-endianize - firmware data is le32. */
+			dwords = sizeof(struct link_statistics) / 4;
+			iter = &stats->link_fail_cnt;
+			for ( ; dwords--; iter++)
+				le32_to_cpus(iter);
+		}
+	} else {
+		/* Failed. */
+		ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
+	}
+
+	return rval;
+}
+
+int
+qla24xx_abort_command(srb_t *sp)
+{
+	int		rval;
+	unsigned long   flags = 0;
+
+	struct abort_entry_24xx *abt;
+	dma_addr_t	abt_dma;
+	uint32_t	handle;
+	fc_port_t	*fcport = sp->fcport;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = vha->req;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+	    "Entered %s.\n", __func__);
+
+	if (sp->qpair)
+		req = sp->qpair->req;
+
+	if (ql2xasynctmfenable)
+		return qla24xx_async_abort_command(sp);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
+		if (req->outstanding_cmds[handle] == sp)
+			break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (handle == req->num_outstanding_cmds) {
+		/* Command not found. */
+		return QLA_FUNCTION_FAILED;
+	}
+
+	abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
+	if (abt == NULL) {
+		ql_log(ql_log_warn, vha, 0x108d,
+		    "Failed to allocate abort IOCB.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(abt, 0, sizeof(struct abort_entry_24xx));
+
+	abt->entry_type = ABORT_IOCB_TYPE;
+	abt->entry_count = 1;
+	abt->handle = MAKE_HANDLE(req->id, abt->handle);
+	abt->nport_handle = cpu_to_le16(fcport->loop_id);
+	abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
+	abt->port_id[0] = fcport->d_id.b.al_pa;
+	abt->port_id[1] = fcport->d_id.b.area;
+	abt->port_id[2] = fcport->d_id.b.domain;
+	abt->vp_index = fcport->vha->vp_idx;
+
+	abt->req_que_no = cpu_to_le16(req->id);
+
+	rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x108e,
+		    "Failed to issue IOCB (%x).\n", rval);
+	} else if (abt->entry_status != 0) {
+		ql_dbg(ql_dbg_mbx, vha, 0x108f,
+		    "Failed to complete IOCB -- error status (%x).\n",
+		    abt->entry_status);
+		rval = QLA_FUNCTION_FAILED;
+	} else if (abt->nport_handle != cpu_to_le16(0)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1090,
+		    "Failed to complete IOCB -- completion status (%x).\n",
+		    le16_to_cpu(abt->nport_handle));
+		if (abt->nport_handle == CS_IOCB_ERROR)
+			rval = QLA_FUNCTION_PARAMETER_ERROR;
+		else
+			rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
+
+	return rval;
+}
+
+struct tsk_mgmt_cmd {
+	union {
+		struct tsk_mgmt_entry tsk;
+		struct sts_entry_24xx sts;
+	} p;
+};
+
+static int
+__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
+    uint64_t l, int tag)
+{
+	int		rval, rval2;
+	struct tsk_mgmt_cmd *tsk;
+	struct sts_entry_24xx *sts;
+	dma_addr_t	tsk_dma;
+	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha;
+	struct req_que *req;
+	struct rsp_que *rsp;
+	struct qla_qpair *qpair;
+
+	vha = fcport->vha;
+	ha = vha->hw;
+	req = vha->req;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+	    "Entered %s.\n", __func__);
+
+	if (vha->vp_idx && vha->qpair) {
+		/* NPIV port */
+		qpair = vha->qpair;
+		rsp = qpair->rsp;
+		req = qpair->req;
+	} else {
+		rsp = req->rsp;
+	}
+
+	tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+	if (tsk == NULL) {
+		ql_log(ql_log_warn, vha, 0x1093,
+		    "Failed to allocate task management IOCB.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
+
+	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
+	tsk->p.tsk.entry_count = 1;
+	tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
+	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
+	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
+	tsk->p.tsk.control_flags = cpu_to_le32(type);
+	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
+	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
+	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
+	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
+	if (type == TCF_LUN_RESET) {
+		int_to_scsilun(l, &tsk->p.tsk.lun);
+		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
+		    sizeof(tsk->p.tsk.lun));
+	}
+
+	sts = &tsk->p.sts;
+	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1094,
+		    "Failed to issue %s reset IOCB (%x).\n", name, rval);
+	} else if (sts->entry_status != 0) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1095,
+		    "Failed to complete IOCB -- error status (%x).\n",
+		    sts->entry_status);
+		rval = QLA_FUNCTION_FAILED;
+	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1096,
+		    "Failed to complete IOCB -- completion status (%x).\n",
+		    le16_to_cpu(sts->comp_status));
+		rval = QLA_FUNCTION_FAILED;
+	} else if (le16_to_cpu(sts->scsi_status) &
+	    SS_RESPONSE_INFO_LEN_VALID) {
+		if (le32_to_cpu(sts->rsp_data_len) < 4) {
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
+			    "Ignoring inconsistent data length -- not enough "
+			    "response info (%d).\n",
+			    le32_to_cpu(sts->rsp_data_len));
+		} else if (sts->data[3]) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1098,
+			    "Failed to complete IOCB -- response (%x).\n",
+			    sts->data[3]);
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+
+	/* Issue marker IOCB. */
+	rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
+	if (rval2 != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1099,
+		    "Failed to issue marker IOCB (%x).\n", rval2);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
+
+	return rval;
+}
+
+int
+qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
+{
+	struct qla_hw_data *ha = fcport->vha->hw;
+
+	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+		return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+
+	return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
+}
+
+int
+qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
+{
+	struct qla_hw_data *ha = fcport->vha->hw;
+
+	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
+		return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+
+	return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
+}
+
+int
+qla2x00_system_error(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 5;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_WRITE_SERDES;
+	mcp->mb[1] = addr;
+	if (IS_QLA2031(vha->hw))
+		mcp->mb[2] = data & 0xff;
+	else
+		mcp->mb[2] = data;
+
+	mcp->mb[3] = 0;
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1183,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_READ_SERDES;
+	mcp->mb[1] = addr;
+	mcp->mb[3] = 0;
+	mcp->out_mb = MBX_3|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (IS_QLA2031(vha->hw))
+		*data = mcp->mb[1] & 0xff;
+	else
+		*data = mcp->mb[1];
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1186,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA8044(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+	mcp->mb[1] = HCS_WRITE_SERDES;
+	mcp->mb[3] = LSW(addr);
+	mcp->mb[4] = MSW(addr);
+	mcp->mb[5] = LSW(data);
+	mcp->mb[6] = MSW(data);
+	mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x11a1,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA8044(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+	mcp->mb[1] = HCS_READ_SERDES;
+	mcp->mb[3] = LSW(addr);
+	mcp->mb[4] = MSW(addr);
+	mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	*data = mcp->mb[2] << 16 | mcp->mb[1];
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x118a,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_set_serdes_params() -
+ * @ha: HA context
+ *
+ * Returns
+ */
+int
+qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
+    uint16_t sw_em_2g, uint16_t sw_em_4g)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SERDES_PARAMS;
+	mcp->mb[1] = BIT_0;
+	mcp->mb[2] = sw_em_1g | BIT_15;
+	mcp->mb[3] = sw_em_2g | BIT_15;
+	mcp->mb[4] = sw_em_4g | BIT_15;
+	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx, vha, 0x109f,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_stop_firmware(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_STOP_FIRMWARE;
+	mcp->mb[1] = 0;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 5;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
+		if (mcp->mb[0] == MBS_INVALID_COMMAND)
+			rval = QLA_INVALID_COMMAND;
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
+    uint16_t buffers)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	if (unlikely(pci_channel_offline(vha->hw->pdev)))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_TRACE_CONTROL;
+	mcp->mb[1] = TC_EFT_ENABLE;
+	mcp->mb[2] = LSW(eft_dma);
+	mcp->mb[3] = MSW(eft_dma);
+	mcp->mb[4] = LSW(MSD(eft_dma));
+	mcp->mb[5] = MSW(MSD(eft_dma));
+	mcp->mb[6] = buffers;
+	mcp->mb[7] = TC_AEN_DISABLE;
+	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10a5,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	if (unlikely(pci_channel_offline(vha->hw->pdev)))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_TRACE_CONTROL;
+	mcp->mb[1] = TC_EFT_DISABLE;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10a8,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
+    uint16_t buffers, uint16_t *mb, uint32_t *dwords)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
+	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	if (unlikely(pci_channel_offline(vha->hw->pdev)))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_TRACE_CONTROL;
+	mcp->mb[1] = TC_FCE_ENABLE;
+	mcp->mb[2] = LSW(fce_dma);
+	mcp->mb[3] = MSW(fce_dma);
+	mcp->mb[4] = LSW(MSD(fce_dma));
+	mcp->mb[5] = MSW(MSD(fce_dma));
+	mcp->mb[6] = buffers;
+	mcp->mb[7] = TC_AEN_DISABLE;
+	mcp->mb[8] = 0;
+	mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
+	mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
+	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
+	    MBX_1|MBX_0;
+	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10ab,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+		    "Done %s.\n", __func__);
+
+		if (mb)
+			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
+		if (dwords)
+			*dwords = buffers;
+	}
+
+	return rval;
+}
+
+int
+qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	if (unlikely(pci_channel_offline(vha->hw->pdev)))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_TRACE_CONTROL;
+	mcp->mb[1] = TC_FCE_DISABLE;
+	mcp->mb[2] = TC_FCE_DISABLE_TRACE;
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
+	    MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10ae,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+		    "Done %s.\n", __func__);
+
+		if (wr)
+			*wr = (uint64_t) mcp->mb[5] << 48 |
+			    (uint64_t) mcp->mb[4] << 32 |
+			    (uint64_t) mcp->mb[3] << 16 |
+			    (uint64_t) mcp->mb[2];
+		if (rd)
+			*rd = (uint64_t) mcp->mb[9] << 48 |
+			    (uint64_t) mcp->mb[8] << 32 |
+			    (uint64_t) mcp->mb[7] << 16 |
+			    (uint64_t) mcp->mb[6];
+	}
+
+	return rval;
+}
+
+int
+qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+	uint16_t *port_speed, uint16_t *mb)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_IIDMA_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_PORT_PARAMS;
+	mcp->mb[1] = loop_id;
+	mcp->mb[2] = mcp->mb[3] = 0;
+	mcp->mb[9] = vha->vp_idx;
+	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_3|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	/* Return mailbox statuses. */
+	if (mb != NULL) {
+		mb[0] = mcp->mb[0];
+		mb[1] = mcp->mb[1];
+		mb[3] = mcp->mb[3];
+	}
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+		    "Done %s.\n", __func__);
+		if (port_speed)
+			*port_speed = mcp->mb[3];
+	}
+
+	return rval;
+}
+
+int
+qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+    uint16_t port_speed, uint16_t *mb)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_IIDMA_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_PORT_PARAMS;
+	mcp->mb[1] = loop_id;
+	mcp->mb[2] = BIT_0;
+	mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+	mcp->mb[9] = vha->vp_idx;
+	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_3|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	/* Return mailbox statuses. */
+	if (mb != NULL) {
+		mb[0] = mcp->mb[0];
+		mb[1] = mcp->mb[1];
+		mb[3] = mcp->mb[3];
+	}
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+		    "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+void
+qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
+	struct vp_rpt_id_entry_24xx *rptid_entry)
+{
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *vp = NULL;
+	unsigned long   flags;
+	int found;
+	port_id_t id;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+	    "Entered %s.\n", __func__);
+
+	if (rptid_entry->entry_status != 0)
+		return;
+
+	id.b.domain = rptid_entry->port_id[2];
+	id.b.area   = rptid_entry->port_id[1];
+	id.b.al_pa  = rptid_entry->port_id[0];
+	id.b.rsvd_1 = 0;
+
+	if (rptid_entry->format == 0) {
+		/* loop */
+		ql_dbg(ql_dbg_async, vha, 0x10b7,
+		    "Format 0 : Number of VPs setup %d, number of "
+		    "VPs acquired %d.\n", rptid_entry->vp_setup,
+		    rptid_entry->vp_acquired);
+		ql_dbg(ql_dbg_async, vha, 0x10b8,
+		    "Primary port id %02x%02x%02x.\n",
+		    rptid_entry->port_id[2], rptid_entry->port_id[1],
+		    rptid_entry->port_id[0]);
+
+		qlt_update_host_map(vha, id);
+
+	} else if (rptid_entry->format == 1) {
+		/* fabric */
+		ql_dbg(ql_dbg_async, vha, 0x10b9,
+		    "Format 1: VP[%d] enabled - status %d - with "
+		    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
+			rptid_entry->vp_status,
+		    rptid_entry->port_id[2], rptid_entry->port_id[1],
+		    rptid_entry->port_id[0]);
+
+		/* buffer to buffer credit flag */
+		vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
+
+		if (rptid_entry->vp_idx == 0) {
+			if (rptid_entry->vp_status == VP_STAT_COMPL) {
+				/* FA-WWN is only for physical port */
+				if (qla_ini_mode_enabled(vha) &&
+				    ha->flags.fawwpn_enabled &&
+				    (rptid_entry->u.f1.flags &
+				     BIT_6)) {
+					memcpy(vha->port_name,
+					    rptid_entry->u.f1.port_name,
+					    WWN_SIZE);
+				}
+
+				qlt_update_host_map(vha, id);
+			}
+
+			set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+			set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+		} else {
+			if (rptid_entry->vp_status != VP_STAT_COMPL &&
+				rptid_entry->vp_status != VP_STAT_ID_CHG) {
+				ql_dbg(ql_dbg_mbx, vha, 0x10ba,
+				    "Could not acquire ID for VP[%d].\n",
+				    rptid_entry->vp_idx);
+				return;
+			}
+
+			found = 0;
+			spin_lock_irqsave(&ha->vport_slock, flags);
+			list_for_each_entry(vp, &ha->vp_list, list) {
+				if (rptid_entry->vp_idx == vp->vp_idx) {
+					found = 1;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+			if (!found)
+				return;
+
+			qlt_update_host_map(vp, id);
+
+			/*
+			 * Cannot configure here as we are still sitting on the
+			 * response queue. Handle it in dpc context.
+			 */
+			set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+			set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
+			set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+		}
+		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+	} else if (rptid_entry->format == 2) {
+		ql_dbg(ql_dbg_async, vha, 0x505f,
+		    "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
+		    rptid_entry->port_id[2], rptid_entry->port_id[1],
+		    rptid_entry->port_id[0]);
+
+		ql_dbg(ql_dbg_async, vha, 0x5075,
+		    "N2N: Remote WWPN %8phC.\n",
+		    rptid_entry->u.f2.port_name);
+
+		/* N2N.  direct connect */
+		vha->d_id.b.domain = rptid_entry->port_id[2];
+		vha->d_id.b.area = rptid_entry->port_id[1];
+		vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		qlt_update_vp_map(vha, SET_AL_PA);
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+	}
+}
+
+/*
+ * qla24xx_modify_vp_config
+ *	Change VP configuration for vha
+ *
+ * Input:
+ *	vha = adapter block pointer.
+ *
+ * Returns:
+ *	qla2xxx local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla24xx_modify_vp_config(scsi_qla_host_t *vha)
+{
+	int		rval;
+	struct vp_config_entry_24xx *vpmod;
+	dma_addr_t	vpmod_dma;
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	/* This can be called by the parent */
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+	    "Entered %s.\n", __func__);
+
+	vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+	if (!vpmod) {
+		ql_log(ql_log_warn, vha, 0x10bc,
+		    "Failed to allocate modify VP IOCB.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
+	vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
+	vpmod->entry_count = 1;
+	vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
+	vpmod->vp_count = 1;
+	vpmod->vp_index1 = vha->vp_idx;
+	vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+	qlt_modify_vp_config(vha, vpmod);
+
+	memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
+	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
+	vpmod->entry_count = 1;
+
+	rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10bd,
+		    "Failed to issue VP config IOCB (%x).\n", rval);
+	} else if (vpmod->comp_status != 0) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10be,
+		    "Failed to complete IOCB -- error status (%x).\n",
+		    vpmod->comp_status);
+		rval = QLA_FUNCTION_FAILED;
+	} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10bf,
+		    "Failed to complete IOCB -- completion status (%x).\n",
+		    le16_to_cpu(vpmod->comp_status));
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		/* EMPTY */
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+		    "Done %s.\n", __func__);
+		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
+	}
+	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
+
+	return rval;
+}
+
+/*
+ * qla24xx_control_vp
+ *	Enable a virtual port for given host
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	vhba = virtual adapter (unused)
+ *	index = index number for enabled VP
+ *
+ * Returns:
+ *	qla2xxx local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+	int		rval;
+	int		map, pos;
+	struct vp_ctrl_entry_24xx   *vce;
+	dma_addr_t	vce_dma;
+	struct qla_hw_data *ha = vha->hw;
+	int	vp_index = vha->vp_idx;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
+	    "Entered %s enabling index %d.\n", __func__, vp_index);
+
+	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+		return QLA_PARAMETER_ERROR;
+
+	vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
+	if (!vce) {
+		ql_log(ql_log_warn, vha, 0x10c2,
+		    "Failed to allocate VP control IOCB.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+	memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
+
+	vce->entry_type = VP_CTRL_IOCB_TYPE;
+	vce->entry_count = 1;
+	vce->command = cpu_to_le16(cmd);
+	vce->vp_count = cpu_to_le16(1);
+
+	/* index map in firmware starts with 1; decrement index
+	 * this is ok as we never use index 0
+	 */
+	map = (vp_index - 1) / 8;
+	pos = (vp_index - 1) & 7;
+	mutex_lock(&ha->vport_lock);
+	vce->vp_idx_map[map] |= 1 << pos;
+	mutex_unlock(&ha->vport_lock);
+
+	rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10c3,
+		    "Failed to issue VP control IOCB (%x).\n", rval);
+	} else if (vce->entry_status != 0) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10c4,
+		    "Failed to complete IOCB -- error status (%x).\n",
+		    vce->entry_status);
+		rval = QLA_FUNCTION_FAILED;
+	} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10c5,
+		    "Failed to complete IOCB -- completion status (%x).\n",
+		    le16_to_cpu(vce->comp_status));
+		rval = QLA_FUNCTION_FAILED;
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_pool_free(ha->s_dma_pool, vce, vce_dma);
+
+	return rval;
+}
+
+/*
+ * qla2x00_send_change_request
+ *	Receive or disable RSCN request from fabric controller
+ *
+ * Input:
+ *	ha = adapter block pointer
+ *	format = registration format:
+ *		0 - Reserved
+ *		1 - Fabric detected registration
+ *		2 - N_port detected registration
+ *		3 - Full registration
+ *		FF - clear registration
+ *	vp_idx = Virtual port index
+ *
+ * Returns:
+ *	qla2x00 local function return status code.
+ *
+ * Context:
+ *	Kernel Context
+ */
+
+int
+qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
+			    uint16_t vp_idx)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
+	mcp->mb[1] = format;
+	mcp->mb[9] = vp_idx;
+	mcp->out_mb = MBX_9|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0|MBX_1;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval == QLA_SUCCESS) {
+		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+			rval = BIT_1;
+		}
+	} else
+		rval = BIT_1;
+
+	return rval;
+}
+
+int
+qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+    uint32_t size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+	    "Entered %s.\n", __func__);
+
+	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
+		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+		mcp->mb[8] = MSW(addr);
+		mcp->out_mb = MBX_8|MBX_0;
+	} else {
+		mcp->mb[0] = MBC_DUMP_RISC_RAM;
+		mcp->out_mb = MBX_0;
+	}
+	mcp->mb[1] = LSW(addr);
+	mcp->mb[2] = MSW(req_dma);
+	mcp->mb[3] = LSW(req_dma);
+	mcp->mb[6] = MSW(MSD(req_dma));
+	mcp->mb[7] = LSW(MSD(req_dma));
+	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
+	if (IS_FWI2_CAPABLE(vha->hw)) {
+		mcp->mb[4] = MSW(size);
+		mcp->mb[5] = LSW(size);
+		mcp->out_mb |= MBX_5|MBX_4;
+	} else {
+		mcp->mb[4] = LSW(size);
+		mcp->out_mb |= MBX_4;
+	}
+
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1008,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+/* 84XX Support **************************************************************/
+
+struct cs84xx_mgmt_cmd {
+	union {
+		struct verify_chip_entry_84xx req;
+		struct verify_chip_rsp_84xx rsp;
+	} p;
+};
+
+int
+qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
+{
+	int rval, retry;
+	struct cs84xx_mgmt_cmd *mn;
+	dma_addr_t mn_dma;
+	uint16_t options;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+	    "Entered %s.\n", __func__);
+
+	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+	if (mn == NULL) {
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	/* Force Update? */
+	options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
+	/* Diagnostic firmware? */
+	/* options |= MENLO_DIAG_FW; */
+	/* We update the firmware with only one data sequence. */
+	options |= VCO_END_OF_DATA;
+
+	do {
+		retry = 0;
+		memset(mn, 0, sizeof(*mn));
+		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
+		mn->p.req.entry_count = 1;
+		mn->p.req.options = cpu_to_le16(options);
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
+		    "Dump of Verify Request.\n");
+		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
+		    (uint8_t *)mn, sizeof(*mn));
+
+		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
+		if (rval != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_mbx, vha, 0x10cb,
+			    "Failed to issue verify IOCB (%x).\n", rval);
+			goto verify_done;
+		}
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
+		    "Dump of Verify Response.\n");
+		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
+		    (uint8_t *)mn, sizeof(*mn));
+
+		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
+		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
+		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
+		    "cs=%x fc=%x.\n", status[0], status[1]);
+
+		if (status[0] != CS_COMPLETE) {
+			rval = QLA_FUNCTION_FAILED;
+			if (!(options & VCO_DONT_UPDATE_FW)) {
+				ql_dbg(ql_dbg_mbx, vha, 0x10cf,
+				    "Firmware update failed. Retrying "
+				    "without update firmware.\n");
+				options |= VCO_DONT_UPDATE_FW;
+				options &= ~VCO_FORCE_UPDATE;
+				retry = 1;
+			}
+		} else {
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
+			    "Firmware updated to %x.\n",
+			    le32_to_cpu(mn->p.rsp.fw_ver));
+
+			/* NOTE: we only update OP firmware. */
+			spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
+			ha->cs84xx->op_fw_version =
+			    le32_to_cpu(mn->p.rsp.fw_ver);
+			spin_unlock_irqrestore(&ha->cs84xx->access_lock,
+			    flags);
+		}
+	} while (retry);
+
+verify_done:
+	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+		    "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+	int rval;
+	unsigned long flags;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+	    "Entered %s.\n", __func__);
+
+	if (IS_SHADOW_REG_CAPABLE(ha))
+		req->options |= BIT_13;
+
+	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+	mcp->mb[1] = req->options;
+	mcp->mb[2] = MSW(LSD(req->dma));
+	mcp->mb[3] = LSW(LSD(req->dma));
+	mcp->mb[6] = MSW(MSD(req->dma));
+	mcp->mb[7] = LSW(MSD(req->dma));
+	mcp->mb[5] = req->length;
+	if (req->rsp)
+		mcp->mb[10] = req->rsp->id;
+	mcp->mb[12] = req->qos;
+	mcp->mb[11] = req->vp_idx;
+	mcp->mb[13] = req->rid;
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mcp->mb[15] = 0;
+
+	mcp->mb[4] = req->id;
+	/* que in ptr index */
+	mcp->mb[8] = 0;
+	/* que out ptr index */
+	mcp->mb[9] = *req->out_ptr = 0;
+	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
+			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS * 2;
+
+	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mcp->in_mb |= MBX_1;
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		mcp->out_mb |= MBX_15;
+		/* debug q create issue in SR-IOV */
+		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (!(req->options & BIT_0)) {
+		WRT_REG_DWORD(req->req_q_in, 0);
+		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+			WRT_REG_DWORD(req->req_q_out, 0);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10d4,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+	int rval;
+	unsigned long flags;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+	    "Entered %s.\n", __func__);
+
+	if (IS_SHADOW_REG_CAPABLE(ha))
+		rsp->options |= BIT_13;
+
+	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+	mcp->mb[1] = rsp->options;
+	mcp->mb[2] = MSW(LSD(rsp->dma));
+	mcp->mb[3] = LSW(LSD(rsp->dma));
+	mcp->mb[6] = MSW(MSD(rsp->dma));
+	mcp->mb[7] = LSW(MSD(rsp->dma));
+	mcp->mb[5] = rsp->length;
+	mcp->mb[14] = rsp->msix->entry;
+	mcp->mb[13] = rsp->rid;
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mcp->mb[15] = 0;
+
+	mcp->mb[4] = rsp->id;
+	/* que in ptr index */
+	mcp->mb[8] = *rsp->in_ptr = 0;
+	/* que out ptr index */
+	mcp->mb[9] = 0;
+	mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
+			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS * 2;
+
+	if (IS_QLA81XX(ha)) {
+		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+		mcp->in_mb |= MBX_1;
+	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
+		mcp->in_mb |= MBX_1;
+		/* debug q create issue in SR-IOV */
+		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (!(rsp->options & BIT_0)) {
+		WRT_REG_DWORD(rsp->rsp_q_out, 0);
+		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+			WRT_REG_DWORD(rsp->rsp_q_in, 0);
+	}
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10d7,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_IDC_ACK;
+	memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
+	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10da,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+	mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10dd,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+		    "Done %s.\n", __func__);
+		*sector_size = mcp->mb[1];
+	}
+
+	return rval;
+}
+
+int
+qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+	mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
+	    FAC_OPT_CMD_WRITE_PROTECT;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e0,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+	mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
+	mcp->mb[2] = LSW(start);
+	mcp->mb[3] = MSW(start);
+	mcp->mb[4] = LSW(finish);
+	mcp->mb[5] = MSW(finish);
+	mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
+{
+	int rval = 0;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_RESTART_MPI_FW;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_0|MBX_1;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e6,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int i;
+	int len;
+	uint16_t *str;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_P3P_TYPE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
+	    "Entered %s.\n", __func__);
+
+	str = (void *)version;
+	len = strlen(version);
+
+	mcp->mb[0] = MBC_SET_RNID_PARAMS;
+	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
+	mcp->out_mb = MBX_1|MBX_0;
+	for (i = 4; i < 16 && len; i++, str++, len -= 2) {
+		mcp->mb[i] = cpu_to_le16p(str);
+		mcp->out_mb |= 1<<i;
+	}
+	for (; i < 16; i++) {
+		mcp->mb[i] = 0;
+		mcp->out_mb |= 1<<i;
+	}
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x117c,
+		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int len;
+	uint16_t dwlen;
+	uint8_t *str;
+	dma_addr_t str_dma;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
+	    IS_P3P_TYPE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
+	    "Entered %s.\n", __func__);
+
+	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+	if (!str) {
+		ql_log(ql_log_warn, vha, 0x117f,
+		    "Failed to allocate driver version param.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	memcpy(str, "\x7\x3\x11\x0", 4);
+	dwlen = str[0];
+	len = dwlen * 4 - 4;
+	memset(str + 4, 0, len);
+	if (len > strlen(version))
+		len = strlen(version);
+	memcpy(str + 4, version, len);
+
+	mcp->mb[0] = MBC_SET_RNID_PARAMS;
+	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+	mcp->mb[2] = MSW(LSD(str_dma));
+	mcp->mb[3] = LSW(LSD(str_dma));
+	mcp->mb[6] = MSW(MSD(str_dma));
+	mcp->mb[7] = LSW(MSD(str_dma));
+	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1180,
+		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+	return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_RNID_PARAMS;
+	mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	*temp = mcp->mb[1];
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x115a,
+		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
+	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	if (len == 1)
+		opt |= BIT_0;
+
+	mcp->mb[0] = MBC_READ_SFP;
+	mcp->mb[1] = dev;
+	mcp->mb[2] = MSW(sfp_dma);
+	mcp->mb[3] = LSW(sfp_dma);
+	mcp->mb[6] = MSW(MSD(sfp_dma));
+	mcp->mb[7] = LSW(MSD(sfp_dma));
+	mcp->mb[8] = len;
+	mcp->mb[9] = off;
+	mcp->mb[10] = opt;
+	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (opt & BIT_0)
+		*sfp = mcp->mb[1];
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+		if (mcp->mb[0] == MBS_COMMAND_ERROR &&
+		    mcp->mb[1] == 0x22)
+			/* sfp is not there */
+			rval = QLA_INTERFACE_ERROR;
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
+	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	if (len == 1)
+		opt |= BIT_0;
+
+	if (opt & BIT_0)
+		len = *sfp;
+
+	mcp->mb[0] = MBC_WRITE_SFP;
+	mcp->mb[1] = dev;
+	mcp->mb[2] = MSW(sfp_dma);
+	mcp->mb[3] = LSW(sfp_dma);
+	mcp->mb[6] = MSW(MSD(sfp_dma));
+	mcp->mb[7] = LSW(MSD(sfp_dma));
+	mcp->mb[8] = len;
+	mcp->mb[9] = off;
+	mcp->mb[10] = opt;
+	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10ec,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
+    uint16_t size_in_bytes, uint16_t *actual_size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_CNA_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_GET_XGMAC_STATS;
+	mcp->mb[2] = MSW(stats_dma);
+	mcp->mb[3] = LSW(stats_dma);
+	mcp->mb[6] = MSW(MSD(stats_dma));
+	mcp->mb[7] = LSW(MSD(stats_dma));
+	mcp->mb[8] = size_in_bytes >> 2;
+	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10ef,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+		    "Done %s.\n", __func__);
+
+
+		*actual_size = mcp->mb[2] << 2;
+	}
+
+	return rval;
+}
+
+int
+qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
+    uint16_t size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_CNA_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_GET_DCBX_PARAMS;
+	mcp->mb[1] = 0;
+	mcp->mb[2] = MSW(tlv_dma);
+	mcp->mb[3] = LSW(tlv_dma);
+	mcp->mb[6] = MSW(MSD(tlv_dma));
+	mcp->mb[7] = LSW(MSD(tlv_dma));
+	mcp->mb[8] = size;
+	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10f2,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_READ_RAM_EXTENDED;
+	mcp->mb[1] = LSW(risc_addr);
+	mcp->mb[8] = MSW(risc_addr);
+	mcp->out_mb = MBX_8|MBX_1|MBX_0;
+	mcp->in_mb = MBX_3|MBX_2|MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10f5,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+		    "Done %s.\n", __func__);
+		*data = mcp->mb[3] << 16 | mcp->mb[2];
+	}
+
+	return rval;
+}
+
+int
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+	uint16_t *mresp)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
+	mcp->mb[1] = mreq->options | BIT_6;	// BIT_6 specifies 64 bit addressing
+
+	/* transfer count */
+	mcp->mb[10] = LSW(mreq->transfer_size);
+	mcp->mb[11] = MSW(mreq->transfer_size);
+
+	/* send data address */
+	mcp->mb[14] = LSW(mreq->send_dma);
+	mcp->mb[15] = MSW(mreq->send_dma);
+	mcp->mb[20] = LSW(MSD(mreq->send_dma));
+	mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+	/* receive data address */
+	mcp->mb[16] = LSW(mreq->rcv_dma);
+	mcp->mb[17] = MSW(mreq->rcv_dma);
+	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+	/* Iteration count */
+	mcp->mb[18] = LSW(mreq->iteration_count);
+	mcp->mb[19] = MSW(mreq->iteration_count);
+
+	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
+	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+	if (IS_CNA_CAPABLE(vha->hw))
+		mcp->out_mb |= MBX_2;
+	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
+
+	mcp->buf_size = mreq->transfer_size;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10f8,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
+		    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+		    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+		    "Done %s.\n", __func__);
+	}
+
+	/* Copy mailbox information */
+	memcpy( mresp, mcp->mb, 64);
+	return rval;
+}
+
+int
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
+	uint16_t *mresp)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
+	/* BIT_6 specifies 64bit address */
+	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
+	if (IS_CNA_CAPABLE(ha)) {
+		mcp->mb[2] = vha->fcoe_fcf_idx;
+	}
+	mcp->mb[16] = LSW(mreq->rcv_dma);
+	mcp->mb[17] = MSW(mreq->rcv_dma);
+	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+	mcp->mb[10] = LSW(mreq->transfer_size);
+
+	mcp->mb[14] = LSW(mreq->send_dma);
+	mcp->mb[15] = MSW(mreq->send_dma);
+	mcp->mb[20] = LSW(MSD(mreq->send_dma));
+	mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
+	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+	if (IS_CNA_CAPABLE(ha))
+		mcp->out_mb |= MBX_2;
+
+	mcp->in_mb = MBX_0;
+	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+	    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+		mcp->in_mb |= MBX_1;
+	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+		mcp->in_mb |= MBX_3;
+
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+	mcp->buf_size = mreq->transfer_size;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10fb,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+		    "Done %s.\n", __func__);
+	}
+
+	/* Copy mailbox information */
+	memcpy(mresp, mcp->mb, 64);
+	return rval;
+}
+
+int
+qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
+	    "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
+
+	mcp->mb[0] = MBC_ISP84XX_RESET;
+	mcp->mb[1] = enable_diagnostic;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS)
+		ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
+	else
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+		    "Done %s.\n", __func__);
+
+	return rval;
+}
+
+int
+qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
+	mcp->mb[1] = LSW(risc_addr);
+	mcp->mb[2] = LSW(data);
+	mcp->mb[3] = MSW(data);
+	mcp->mb[8] = MSW(risc_addr);
+	mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1101,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
+{
+	int rval;
+	uint32_t stat, timer;
+	uint16_t mb0 = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	rval = QLA_SUCCESS;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+	    "Entered %s.\n", __func__);
+
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	/* Write the MBC data to the registers */
+	WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
+	WRT_REG_WORD(&reg->mailbox1, mb[0]);
+	WRT_REG_WORD(&reg->mailbox2, mb[1]);
+	WRT_REG_WORD(&reg->mailbox3, mb[2]);
+	WRT_REG_WORD(&reg->mailbox4, mb[3]);
+
+	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+	/* Poll for MBC interrupt */
+	for (timer = 6000000; timer; timer--) {
+		/* Check for pending interrupts. */
+		stat = RD_REG_DWORD(&reg->host_status);
+		if (stat & HSRX_RISC_INT) {
+			stat &= 0xff;
+
+			if (stat == 0x1 || stat == 0x2 ||
+			    stat == 0x10 || stat == 0x11) {
+				set_bit(MBX_INTERRUPT,
+				    &ha->mbx_cmd_flags);
+				mb0 = RD_REG_WORD(&reg->mailbox0);
+				WRT_REG_DWORD(&reg->hccr,
+				    HCCRX_CLR_RISC_INT);
+				RD_REG_DWORD(&reg->hccr);
+				break;
+			}
+		}
+		udelay(5);
+	}
+
+	if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
+		rval = mb0 & MBS_MASK;
+	else
+		rval = QLA_FUNCTION_FAILED;
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1104,
+		    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_get_data_rate(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_DATA_RATE;
+	mcp->mb[1] = 0;
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		mcp->in_mb |= MBX_3;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1107,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+		    "Done %s.\n", __func__);
+		if (mcp->mb[1] != 0x7)
+			ha->link_data_rate = mcp->mb[1];
+	}
+
+	return rval;
+}
+
+int
+qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
+	    !IS_QLA27XX(ha))
+		return QLA_FUNCTION_FAILED;
+	mcp->mb[0] = MBC_GET_PORT_CONFIG;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x110a,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		/* Copy all bits to preserve original value */
+		memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
+
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+int
+qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SET_PORT_CONFIG;
+	/* Copy all bits to preserve original setting */
+	memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
+	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x110d,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+		    "Done %s.\n", __func__);
+
+	return rval;
+}
+
+
+int
+qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
+		uint16_t *mb)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+		return QLA_FUNCTION_FAILED;
+
+	mcp->mb[0] = MBC_PORT_PARAMS;
+	mcp->mb[1] = loop_id;
+	if (ha->flags.fcp_prio_enabled)
+		mcp->mb[2] = BIT_1;
+	else
+		mcp->mb[2] = BIT_2;
+	mcp->mb[4] = priority & 0xf;
+	mcp->mb[9] = vha->vp_idx;
+	mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (mb != NULL) {
+		mb[0] = mcp->mb[0];
+		mb[1] = mcp->mb[1];
+		mb[3] = mcp->mb[3];
+		mb[4] = mcp->mb[4];
+	}
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
+	uint8_t byte;
+
+	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1150,
+		    "Thermal not supported by this card.\n");
+		return rval;
+	}
+
+	if (IS_QLA25XX(ha)) {
+		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+		    ha->pdev->subsystem_device == 0x0175) {
+			rval = qla2x00_read_sfp(vha, 0, &byte,
+			    0x98, 0x1, 1, BIT_13|BIT_0);
+			*temp = byte;
+			return rval;
+		}
+		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+		    ha->pdev->subsystem_device == 0x338e) {
+			rval = qla2x00_read_sfp(vha, 0, &byte,
+			    0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
+			*temp = byte;
+			return rval;
+		}
+		ql_dbg(ql_dbg_mbx, vha, 0x10c9,
+		    "Thermal not supported by this card.\n");
+		return rval;
+	}
+
+	if (IS_QLA82XX(ha)) {
+		*temp = qla82xx_read_temperature(vha);
+		rval = QLA_SUCCESS;
+		return rval;
+	} else if (IS_QLA8044(ha)) {
+		*temp = qla8044_read_temperature(vha);
+		rval = QLA_SUCCESS;
+		return rval;
+	}
+
+	rval = qla2x00_read_asic_temperature(vha, temp);
+	return rval;
+}
+
+int
+qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_FWI2_CAPABLE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
+	mcp->mb[1] = 1;
+
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1016,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+	    "Entered %s.\n", __func__);
+
+	if (!IS_P3P_TYPE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
+	mcp->mb[1] = 0;
+
+	mcp->out_mb = MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x100c,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla82xx_md_get_template_size(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int rval = QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+	mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
+	mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
+
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	/* Always copy back return mailbox values. */
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1120,
+		    "mailbox command FAILED=0x%x, subcode=%x.\n",
+		    (mcp->mb[1] << 16) | mcp->mb[0],
+		    (mcp->mb[3] << 16) | mcp->mb[2]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+		    "Done %s.\n", __func__);
+		ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
+		if (!ha->md_template_size) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1122,
+			    "Null template size obtained.\n");
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+	return rval;
+}
+
+int
+qla82xx_md_get_template(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int rval = QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+	    "Entered %s.\n", __func__);
+
+	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+	if (!ha->md_tmplt_hdr) {
+		ql_log(ql_log_warn, vha, 0x1124,
+		    "Unable to allocate memory for Minidump template.\n");
+		return rval;
+	}
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+	mcp->mb[2] = LSW(RQST_TMPLT);
+	mcp->mb[3] = MSW(RQST_TMPLT);
+	mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
+	mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
+	mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
+	mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
+	mcp->mb[8] = LSW(ha->md_template_size);
+	mcp->mb[9] = MSW(ha->md_template_size);
+
+	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1125,
+		    "mailbox command FAILED=0x%x, subcode=%x.\n",
+		    ((mcp->mb[1] << 16) | mcp->mb[0]),
+		    ((mcp->mb[3] << 16) | mcp->mb[2]));
+	} else
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+		    "Done %s.\n", __func__);
+	return rval;
+}
+
+int
+qla8044_md_get_template(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	int rval = QLA_FUNCTION_FAILED;
+	int offset = 0, size = MINIDUMP_SIZE_36K;
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
+	    "Entered %s.\n", __func__);
+
+	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+	if (!ha->md_tmplt_hdr) {
+		ql_log(ql_log_warn, vha, 0xb11b,
+		    "Unable to allocate memory for Minidump template.\n");
+		return rval;
+	}
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	while (offset < ha->md_template_size) {
+		mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+		mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+		mcp->mb[2] = LSW(RQST_TMPLT);
+		mcp->mb[3] = MSW(RQST_TMPLT);
+		mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
+		mcp->mb[8] = LSW(size);
+		mcp->mb[9] = MSW(size);
+		mcp->mb[10] = offset & 0x0000FFFF;
+		mcp->mb[11] = offset & 0xFFFF0000;
+		mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+		mcp->tov = MBX_TOV_SECONDS;
+		mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+		mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+		rval = qla2x00_mailbox_command(vha, mcp);
+
+		if (rval != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_mbx, vha, 0xb11c,
+				"mailbox command FAILED=0x%x, subcode=%x.\n",
+				((mcp->mb[1] << 16) | mcp->mb[0]),
+				((mcp->mb[3] << 16) | mcp->mb[2]));
+			return rval;
+		} else
+			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
+				"Done %s.\n", __func__);
+		offset = offset + size;
+	}
+	return rval;
+}
+
+int
+qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_SET_LED_CONFIG;
+	mcp->mb[1] = led_cfg[0];
+	mcp->mb[2] = led_cfg[1];
+	if (IS_QLA8031(ha)) {
+		mcp->mb[3] = led_cfg[2];
+		mcp->mb[4] = led_cfg[3];
+		mcp->mb[5] = led_cfg[4];
+		mcp->mb[6] = led_cfg[5];
+	}
+
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA8031(ha))
+		mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+	mcp->in_mb = MBX_0;
+	mcp->tov = 30;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1134,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+	    "Entered %s.\n", __func__);
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_GET_LED_CONFIG;
+
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (IS_QLA8031(ha))
+		mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
+	mcp->tov = 30;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1137,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		led_cfg[0] = mcp->mb[1];
+		led_cfg[1] = mcp->mb[2];
+		if (IS_QLA8031(ha)) {
+			led_cfg[2] = mcp->mb[3];
+			led_cfg[3] = mcp->mb[4];
+			led_cfg[4] = mcp->mb[5];
+			led_cfg[5] = mcp->mb[6];
+		}
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_P3P_TYPE(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
+		"Entered %s.\n", __func__);
+
+	memset(mcp, 0, sizeof(mbx_cmd_t));
+	mcp->mb[0] = MBC_SET_LED_CONFIG;
+	if (enable)
+		mcp->mb[7] = 0xE;
+	else
+		mcp->mb[7] = 0xD;
+
+	mcp->out_mb = MBX_7|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1128,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
+	mcp->mb[1] = LSW(reg);
+	mcp->mb[2] = MSW(reg);
+	mcp->mb[3] = LSW(data);
+	mcp->mb[4] = MSW(data);
+	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1131,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
+		    "Implicit LOGO Unsupported.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+	    "Entering %s.\n",  __func__);
+
+	/* Perform Implicit LOGO. */
+	mcp->mb[0] = MBC_PORT_LOGOUT;
+	mcp->mb[1] = fcport->loop_id;
+	mcp->mb[10] = BIT_15;
+	mcp->out_mb = MBX_10|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS)
+		ql_dbg(ql_dbg_mbx, vha, 0x113d,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	else
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+		    "Done %s.\n", __func__);
+
+	return rval;
+}
+
+int
+qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long retry_max_time = jiffies + (2 * HZ);
+
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
+
+retry_rd_reg:
+	mcp->mb[0] = MBC_READ_REMOTE_REG;
+	mcp->mb[1] = LSW(reg);
+	mcp->mb[2] = MSW(reg);
+	mcp->out_mb = MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x114c,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+	} else {
+		*data = (mcp->mb[3] | (mcp->mb[4] << 16));
+		if (*data == QLA8XXX_BAD_VALUE) {
+			/*
+			 * During soft-reset CAMRAM register reads might
+			 * return 0xbad0bad0. So retry for MAX of 2 sec
+			 * while reading camram registers.
+			 */
+			if (time_after(jiffies, retry_max_time)) {
+				ql_dbg(ql_dbg_mbx, vha, 0x1141,
+				    "Failure to read CAMRAM register. "
+				    "data=0x%x.\n", *data);
+				return QLA_FUNCTION_FAILED;
+			}
+			msleep(100);
+			goto retry_rd_reg;
+		}
+		ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1144,
+		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1]);
+		ha->isp_ops->fw_dump(vha, 0);
+	} else {
+		ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
+	uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	uint8_t subcode = (uint8_t)options;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA8031(ha))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
+	mcp->mb[1] = options;
+	mcp->out_mb = MBX_1|MBX_0;
+	if (subcode & BIT_2) {
+		mcp->mb[2] = LSW(start_addr);
+		mcp->mb[3] = MSW(start_addr);
+		mcp->mb[4] = LSW(end_addr);
+		mcp->mb[5] = MSW(end_addr);
+		mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
+	}
+	mcp->in_mb = MBX_2|MBX_1|MBX_0;
+	if (!(subcode & (BIT_2 | BIT_5)))
+		mcp->in_mb |= MBX_4|MBX_3;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1147,
+		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
+		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+		    mcp->mb[4]);
+		ha->isp_ops->fw_dump(vha, 0);
+	} else {
+		if (subcode & BIT_5)
+			*sector_size = mcp->mb[1];
+		else if (subcode & (BIT_6 | BIT_7)) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1148,
+			    "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+		} else if (subcode & (BIT_3 | BIT_4)) {
+			ql_dbg(ql_dbg_mbx, vha, 0x1149,
+			    "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
+		}
+		ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+	uint32_t size)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+
+	if (!IS_MCTP_CAPABLE(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+	mcp->mb[1] = LSW(addr);
+	mcp->mb[2] = MSW(req_dma);
+	mcp->mb[3] = LSW(req_dma);
+	mcp->mb[4] = MSW(size);
+	mcp->mb[5] = LSW(size);
+	mcp->mb[6] = MSW(MSD(req_dma));
+	mcp->mb[7] = LSW(MSD(req_dma));
+	mcp->mb[8] = MSW(addr);
+	/* Setting RAM ID to valid */
+	/* For MCTP RAM ID is 0x40 */
+	mcp->mb[10] = BIT_7 | 0x40;
+
+	mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
+	    MBX_0;
+
+	mcp->in_mb = MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x114e,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+int
+qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
+	void *dd_buf, uint size, uint options)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	dma_addr_t dd_dma;
+
+	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+	    "Entered %s.\n", __func__);
+
+	dd_dma = dma_map_single(&vha->hw->pdev->dev,
+	    dd_buf, size, DMA_FROM_DEVICE);
+	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
+		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
+		return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	memset(dd_buf, 0, size);
+
+	mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+	mcp->mb[1] = options;
+	mcp->mb[2] = MSW(LSD(dd_dma));
+	mcp->mb[3] = LSW(LSD(dd_dma));
+	mcp->mb[6] = MSW(MSD(dd_dma));
+	mcp->mb[7] = LSW(MSD(dd_dma));
+	mcp->mb[8] = size;
+	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->buf_size = size;
+	mcp->flags = MBX_DMA_IN;
+	mcp->tov = MBX_TOV_SECONDS * 4;
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+		    "Done %s.\n", __func__);
+	}
+
+	dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
+	    size, DMA_FROM_DEVICE);
+
+	return rval;
+}
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+	struct srb *sp = s;
+
+	sp->u.iocb_cmd.u.mbx.rc = res;
+
+	complete(&sp->u.iocb_cmd.u.mbx.comp);
+	/* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	srb_t *sp;
+	struct srb_iocb *c;
+
+	if (!vha->hw->flags.fw_started)
+		goto done;
+
+	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_MB_IOCB;
+	sp->name = mb_to_str(mcp->mb[0]);
+
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+	memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+	c = &sp->u.iocb_cmd;
+	c->timeout = qla2x00_async_iocb_timeout;
+	init_completion(&c->u.mbx.comp);
+
+	sp->done = qla2x00_async_mb_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1018,
+		    "%s: %s Failed submission. %x.\n",
+		    __func__, sp->name, rval);
+		goto done_free_sp;
+	}
+
+	ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
+	    sp->name, sp->handle);
+
+	wait_for_completion(&c->u.mbx.comp);
+	memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+	rval = c->u.mbx.rc;
+	switch (rval) {
+	case QLA_FUNCTION_TIMEOUT:
+		ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
+		    __func__, sp->name, rval);
+		break;
+	case  QLA_SUCCESS:
+		ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
+		    __func__, sp->name);
+		sp->free(sp);
+		break;
+	default:
+		ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
+		    __func__, sp->name, rval);
+		sp->free(sp);
+		break;
+	}
+
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	dma_addr_t pd_dma;
+	struct port_database_24xx *pd;
+	struct qla_hw_data *ha = vha->hw;
+	mbx_cmd_t mc;
+
+	if (!vha->hw->flags.fw_started)
+		goto done;
+
+	pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+	if (pd  == NULL) {
+		ql_log(ql_log_warn, vha, 0xd047,
+		    "Failed to allocate port database structure.\n");
+		goto done_free_sp;
+	}
+	memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+	memset(&mc, 0, sizeof(mc));
+	mc.mb[0] = MBC_GET_PORT_DATABASE;
+	mc.mb[1] = cpu_to_le16(fcport->loop_id);
+	mc.mb[2] = MSW(pd_dma);
+	mc.mb[3] = LSW(pd_dma);
+	mc.mb[6] = MSW(MSD(pd_dma));
+	mc.mb[7] = LSW(MSD(pd_dma));
+	mc.mb[9] = cpu_to_le16(vha->vp_idx);
+	mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+	rval = qla24xx_send_mb_cmd(vha, &mc);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1193,
+		    "%s: %8phC fail\n", __func__, fcport->port_name);
+		goto done_free_sp;
+	}
+
+	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
+	    __func__, fcport->port_name);
+
+done_free_sp:
+	if (pd)
+		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+	return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+    struct port_database_24xx *pd)
+{
+	int rval = QLA_SUCCESS;
+	uint64_t zero = 0;
+	u8 current_login_state, last_login_state;
+
+	if (fcport->fc4f_nvme) {
+		current_login_state = pd->current_login_state >> 4;
+		last_login_state = pd->last_login_state >> 4;
+	} else {
+		current_login_state = pd->current_login_state & 0xf;
+		last_login_state = pd->last_login_state & 0xf;
+	}
+
+	/* Check for logged in state. */
+	if (current_login_state != PDS_PRLI_COMPLETE) {
+		ql_dbg(ql_dbg_mbx, vha, 0x119a,
+		    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
+		    current_login_state, last_login_state, fcport->loop_id);
+		rval = QLA_FUNCTION_FAILED;
+		goto gpd_error_out;
+	}
+
+	if (fcport->loop_id == FC_NO_LOOP_ID ||
+	    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+	     memcmp(fcport->port_name, pd->port_name, 8))) {
+		/* We lost the device mid way. */
+		rval = QLA_NOT_LOGGED_IN;
+		goto gpd_error_out;
+	}
+
+	/* Names are little-endian. */
+	memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+	memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+	/* Get port_id of device. */
+	fcport->d_id.b.domain = pd->port_id[0];
+	fcport->d_id.b.area = pd->port_id[1];
+	fcport->d_id.b.al_pa = pd->port_id[2];
+	fcport->d_id.b.rsvd_1 = 0;
+
+	if (fcport->fc4f_nvme) {
+		fcport->nvme_prli_service_param =
+		    pd->prli_nvme_svc_param_word_3;
+		fcport->port_type = FCT_NVME;
+	} else {
+		/* If not target must be initiator or unknown type. */
+		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+			fcport->port_type = FCT_INITIATOR;
+		else
+			fcport->port_type = FCT_TARGET;
+	}
+	/* Passback COS information. */
+	fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+		FC_COS_CLASS2 : FC_COS_CLASS3;
+
+	if (pd->prli_svc_param_word_3[0] & BIT_7) {
+		fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+		fcport->conf_compl_supported = 1;
+	}
+
+gpd_error_out:
+	return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+	void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	mbx_cmd_t mc;
+
+	if (!vha->hw->flags.fw_started)
+		goto done;
+
+	memset(&mc, 0, sizeof(mc));
+	mc.mb[0] = MBC_GET_ID_LIST;
+	mc.mb[2] = MSW(id_list_dma);
+	mc.mb[3] = LSW(id_list_dma);
+	mc.mb[6] = MSW(MSD(id_list_dma));
+	mc.mb[7] = LSW(MSD(id_list_dma));
+	mc.mb[8] = 0;
+	mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+	rval = qla24xx_send_mb_cmd(vha, &mc);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x119b,
+		    "%s:  fail\n", __func__);
+	} else {
+		*entries = mc.mb[1];
+		ql_dbg(ql_dbg_mbx, vha, 0x119c,
+		    "%s:  done\n", __func__);
+	}
+done:
+	return rval;
+}
+
+int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0 , sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
+	mcp->mb[1] = cpu_to_le16(1);
+	mcp->mb[2] = cpu_to_le16(value);
+	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
+	mcp->in_mb = MBX_2 | MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
+	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
+
+	return rval;
+}
+
+int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
+{
+	int rval;
+	mbx_cmd_t	mc;
+	mbx_cmd_t	*mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
+	    "Entered %s\n", __func__);
+
+	memset(mcp->mb, 0, sizeof(mcp->mb));
+	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
+	mcp->mb[1] = cpu_to_le16(0);
+	mcp->out_mb = MBX_1 | MBX_0;
+	mcp->in_mb = MBX_2 | MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval == QLA_SUCCESS)
+		*value = mc.mb[2];
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
+	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
+
+	return rval;
+}
+
+int
+qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t iter, addr, offset;
+	dma_addr_t phys_addr;
+	int rval, c;
+	u8 *sfp_data;
+
+	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
+	addr = 0xa0;
+	phys_addr = ha->sfp_data_dma;
+	sfp_data = ha->sfp_data;
+	offset = c = 0;
+
+	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
+		if (iter == 4) {
+			/* Skip to next device address. */
+			addr = 0xa2;
+			offset = 0;
+		}
+
+		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
+		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x706d,
+			    "Unable to read SFP data (%x/%x/%x).\n", rval,
+			    addr, offset);
+
+			return rval;
+		}
+
+		if (buf && (c < count)) {
+			u16 sz;
+
+			if ((count - c) >= SFP_BLOCK_SIZE)
+				sz = SFP_BLOCK_SIZE;
+			else
+				sz = count - c;
+
+			memcpy(buf, sfp_data, sz);
+			buf += SFP_BLOCK_SIZE;
+			c += sz;
+		}
+		phys_addr += SFP_BLOCK_SIZE;
+		sfp_data  += SFP_BLOCK_SIZE;
+		offset += SFP_BLOCK_SIZE;
+	}
+
+	return rval;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mid.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mid.c
new file mode 100644
index 0000000..375a88e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mid.c
@@ -0,0 +1,901 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_gbl.h"
+#include "qla_target.h"
+
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <linux/delay.h>
+
+void
+qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
+{
+	if (vha->vp_idx && vha->timer_active) {
+		del_timer_sync(&vha->timer);
+		vha->timer_active = 0;
+	}
+}
+
+static uint32_t
+qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
+{
+	uint32_t vp_id;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+
+	/* Find an empty slot and assign an vp_id */
+	mutex_lock(&ha->vport_lock);
+	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
+	if (vp_id > ha->max_npiv_vports) {
+		ql_dbg(ql_dbg_vport, vha, 0xa000,
+		    "vp_id %d is bigger than max-supported %d.\n",
+		    vp_id, ha->max_npiv_vports);
+		mutex_unlock(&ha->vport_lock);
+		return vp_id;
+	}
+
+	set_bit(vp_id, ha->vp_idx_map);
+	ha->num_vhosts++;
+	vha->vp_idx = vp_id;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_add_tail(&vha->list, &ha->vp_list);
+
+	qlt_update_vp_map(vha, SET_VP_IDX);
+
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	mutex_unlock(&ha->vport_lock);
+	return vp_id;
+}
+
+void
+qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
+{
+	uint16_t vp_id;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags = 0;
+
+	mutex_lock(&ha->vport_lock);
+	/*
+	 * Wait for all pending activities to finish before removing vport from
+	 * the list.
+	 * Lock needs to be held for safe removal from the list (it
+	 * ensures no active vp_list traversal while the vport is removed
+	 * from the queue)
+	 */
+	wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
+	    10*HZ);
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	if (atomic_read(&vha->vref_count)) {
+		ql_dbg(ql_dbg_vport, vha, 0xfffa,
+		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
+	}
+	list_del(&vha->list);
+	qlt_update_vp_map(vha, RESET_VP_IDX);
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+	vp_id = vha->vp_idx;
+	ha->num_vhosts--;
+	clear_bit(vp_id, ha->vp_idx_map);
+
+	mutex_unlock(&ha->vport_lock);
+}
+
+static scsi_qla_host_t *
+qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
+{
+	scsi_qla_host_t *vha;
+	struct scsi_qla_host *tvha;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	/* Locate matching device in database. */
+	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
+		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
+			spin_unlock_irqrestore(&ha->vport_slock, flags);
+			return vha;
+		}
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+	return NULL;
+}
+
+/*
+ * qla2x00_mark_vp_devices_dead
+ *	Updates fcport state when device goes offline.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	fcport = port structure pointer.
+ *
+ * Return:
+ *	None.
+ *
+ * Context:
+ */
+static void
+qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
+{
+	/*
+	 * !!! NOTE !!!
+	 * This function, if called in contexts other than vp create, disable
+	 * or delete, please make sure this is synchronized with the
+	 * delete thread.
+	 */
+	fc_port_t *fcport;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		ql_dbg(ql_dbg_vport, vha, 0xa001,
+		    "Marking port dead, loop_id=0x%04x : %x.\n",
+		    fcport->loop_id, fcport->vha->vp_idx);
+
+		qla2x00_mark_device_lost(vha, fcport, 0, 0);
+		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
+	}
+}
+
+int
+qla24xx_disable_vp(scsi_qla_host_t *vha)
+{
+	unsigned long flags;
+	int ret;
+	fc_port_t *fcport;
+
+	ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	list_for_each_entry(fcport, &vha->vp_fcports, list)
+		fcport->logout_on_delete = 0;
+
+	qla2x00_mark_all_devices_lost(vha, 0);
+
+	/* Remove port id from vp target map */
+	spin_lock_irqsave(&vha->hw->vport_slock, flags);
+	qlt_update_vp_map(vha, RESET_AL_PA);
+	spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
+	qla2x00_mark_vp_devices_dead(vha);
+	atomic_set(&vha->vp_state, VP_FAILED);
+	vha->flags.management_server_logged_in = 0;
+	if (ret == QLA_SUCCESS) {
+		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
+	} else {
+		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		return -1;
+	}
+	return 0;
+}
+
+int
+qla24xx_enable_vp(scsi_qla_host_t *vha)
+{
+	int ret;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	/* Check if physical ha port is Up */
+	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
+		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
+		!(ha->current_topology & ISP_CFG_F)) {
+		vha->vp_err_state =  VP_ERR_PORTDWN;
+		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
+		ql_dbg(ql_dbg_taskm, vha, 0x800b,
+		    "%s skip enable. loop_state %x topo %x\n",
+		    __func__, base_vha->loop_state.counter,
+		    ha->current_topology);
+
+		goto enable_failed;
+	}
+
+	/* Initialize the new vport unless it is a persistent port */
+	mutex_lock(&ha->vport_lock);
+	ret = qla24xx_modify_vp_config(vha);
+	mutex_unlock(&ha->vport_lock);
+
+	if (ret != QLA_SUCCESS) {
+		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+		goto enable_failed;
+	}
+
+	ql_dbg(ql_dbg_taskm, vha, 0x801a,
+	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
+	return 0;
+
+enable_failed:
+	ql_dbg(ql_dbg_taskm, vha, 0x801b,
+	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
+	return 1;
+}
+
+static void
+qla24xx_configure_vp(scsi_qla_host_t *vha)
+{
+	struct fc_vport *fc_vport;
+	int ret;
+
+	fc_vport = vha->fc_vport;
+
+	ql_dbg(ql_dbg_vport, vha, 0xa002,
+	    "%s: change request #3.\n", __func__);
+	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
+	if (ret != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
+		    "receiving of RSCN requests: 0x%x.\n", ret);
+		return;
+	} else {
+		/* Corresponds to SCR enabled */
+		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
+	}
+
+	vha->flags.online = 1;
+	if (qla24xx_configure_vhba(vha))
+		return;
+
+	atomic_set(&vha->vp_state, VP_ACTIVE);
+	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+void
+qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
+{
+	scsi_qla_host_t *vha;
+	struct qla_hw_data *ha = rsp->hw;
+	int i = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_for_each_entry(vha, &ha->vp_list, list) {
+		if (vha->vp_idx) {
+			atomic_inc(&vha->vref_count);
+			spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+			switch (mb[0]) {
+			case MBA_LIP_OCCURRED:
+			case MBA_LOOP_UP:
+			case MBA_LOOP_DOWN:
+			case MBA_LIP_RESET:
+			case MBA_POINT_TO_POINT:
+			case MBA_CHG_IN_CONNECTION:
+			case MBA_PORT_UPDATE:
+			case MBA_RSCN_UPDATE:
+				ql_dbg(ql_dbg_async, vha, 0x5024,
+				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
+				    i, *mb, vha);
+				qla2x00_async_event(vha, rsp, mb);
+				break;
+			}
+
+			spin_lock_irqsave(&ha->vport_slock, flags);
+			atomic_dec(&vha->vref_count);
+			wake_up(&vha->vref_waitq);
+		}
+		i++;
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+int
+qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
+{
+	/*
+	 * Physical port will do most of the abort and recovery work. We can
+	 * just treat it as a loop down
+	 */
+	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		qla2x00_mark_all_devices_lost(vha, 0);
+	} else {
+		if (!atomic_read(&vha->loop_down_timer))
+			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	}
+
+	/*
+	 * To exclusively reset vport, we need to log it out first.  Note: this
+	 * control_vp can fail if ISP reset is already issued, this is
+	 * expected, as the vp would be already logged out due to ISP reset.
+	 */
+	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+
+	ql_dbg(ql_dbg_taskm, vha, 0x801d,
+	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
+	return qla24xx_enable_vp(vha);
+}
+
+static int
+qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
+
+	qla2x00_do_work(vha);
+
+	/* Check if Fw is ready to configure VP first */
+	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
+		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
+			/* VP acquired. complete port configuration */
+			ql_dbg(ql_dbg_dpc, vha, 0x4014,
+			    "Configure VP scheduled.\n");
+			qla24xx_configure_vp(vha);
+			ql_dbg(ql_dbg_dpc, vha, 0x4015,
+			    "Configure VP end.\n");
+			return 0;
+		}
+	}
+
+	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
+		ql_dbg(ql_dbg_dpc, vha, 0x4016,
+		    "FCPort update scheduled.\n");
+		qla2x00_update_fcports(vha);
+		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+		ql_dbg(ql_dbg_dpc, vha, 0x4017,
+		    "FCPort update end.\n");
+	}
+
+	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
+	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+	    atomic_read(&vha->loop_state) != LOOP_DOWN) {
+
+		if (!vha->relogin_jif ||
+		    time_after_eq(jiffies, vha->relogin_jif)) {
+			vha->relogin_jif = jiffies + HZ;
+			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+			ql_dbg(ql_dbg_dpc, vha, 0x4018,
+			    "Relogin needed scheduled.\n");
+			qla2x00_relogin(vha);
+			ql_dbg(ql_dbg_dpc, vha, 0x4019,
+			    "Relogin needed end.\n");
+		}
+	}
+
+	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
+	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
+		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
+	}
+
+	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
+			ql_dbg(ql_dbg_dpc, vha, 0x401a,
+			    "Loop resync scheduled.\n");
+			qla2x00_loop_resync(vha);
+			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
+			ql_dbg(ql_dbg_dpc, vha, 0x401b,
+			    "Loop resync end.\n");
+		}
+	}
+
+	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
+	    "Exiting %s.\n", __func__);
+	return 0;
+}
+
+void
+qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *vp;
+	unsigned long flags = 0;
+
+	if (vha->vp_idx)
+		return;
+	if (list_empty(&ha->vp_list))
+		return;
+
+	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
+
+	if (!(ha->current_topology & ISP_CFG_F))
+		return;
+
+	spin_lock_irqsave(&ha->vport_slock, flags);
+	list_for_each_entry(vp, &ha->vp_list, list) {
+		if (vp->vp_idx) {
+			atomic_inc(&vp->vref_count);
+			spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+			qla2x00_do_dpc_vp(vp);
+
+			spin_lock_irqsave(&ha->vport_slock, flags);
+			atomic_dec(&vp->vref_count);
+		}
+	}
+	spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
+int
+qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
+{
+	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+	struct qla_hw_data *ha = base_vha->hw;
+	scsi_qla_host_t *vha;
+	uint8_t port_name[WWN_SIZE];
+
+	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
+		return VPCERR_UNSUPPORTED;
+
+	/* Check up the F/W and H/W support NPIV */
+	if (!ha->flags.npiv_supported)
+		return VPCERR_UNSUPPORTED;
+
+	/* Check up whether npiv supported switch presented */
+	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
+		return VPCERR_NO_FABRIC_SUPP;
+
+	/* Check up unique WWPN */
+	u64_to_wwn(fc_vport->port_name, port_name);
+	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
+		return VPCERR_BAD_WWN;
+	vha = qla24xx_find_vhost_by_name(ha, port_name);
+	if (vha)
+		return VPCERR_BAD_WWN;
+
+	/* Check up max-npiv-supports */
+	if (ha->num_vhosts > ha->max_npiv_vports) {
+		ql_dbg(ql_dbg_vport, vha, 0xa004,
+		    "num_vhosts %ud is bigger "
+		    "than max_npiv_vports %ud.\n",
+		    ha->num_vhosts, ha->max_npiv_vports);
+		return VPCERR_UNSUPPORTED;
+	}
+	return 0;
+}
+
+scsi_qla_host_t *
+qla24xx_create_vhost(struct fc_vport *fc_vport)
+{
+	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+	struct qla_hw_data *ha = base_vha->hw;
+	scsi_qla_host_t *vha;
+	struct scsi_host_template *sht = &qla2xxx_driver_template;
+	struct Scsi_Host *host;
+
+	vha = qla2x00_create_host(sht, ha);
+	if (!vha) {
+		ql_log(ql_log_warn, vha, 0xa005,
+		    "scsi_host_alloc() failed for vport.\n");
+		return(NULL);
+	}
+
+	host = vha->host;
+	fc_vport->dd_data = vha;
+	/* New host info */
+	u64_to_wwn(fc_vport->node_name, vha->node_name);
+	u64_to_wwn(fc_vport->port_name, vha->port_name);
+
+	vha->fc_vport = fc_vport;
+	vha->device_flags = 0;
+	vha->vp_idx = qla24xx_allocate_vp_id(vha);
+	if (vha->vp_idx > ha->max_npiv_vports) {
+		ql_dbg(ql_dbg_vport, vha, 0xa006,
+		    "Couldn't allocate vp_id.\n");
+		goto create_vhost_failed;
+	}
+	vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+
+	vha->dpc_flags = 0L;
+
+	/*
+	 * To fix the issue of processing a parent's RSCN for the vport before
+	 * its SCR is complete.
+	 */
+	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+	qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
+
+	vha->req = base_vha->req;
+	host->can_queue = base_vha->req->length + 128;
+	host->cmd_per_lun = 3;
+	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
+		host->max_cmd_len = 32;
+	else
+		host->max_cmd_len = MAX_CMDSZ;
+	host->max_channel = MAX_BUSES - 1;
+	host->max_lun = ql2xmaxlun;
+	host->unique_id = host->host_no;
+	host->max_id = ha->max_fibre_devices;
+	host->transportt = qla2xxx_transport_vport_template;
+
+	ql_dbg(ql_dbg_vport, vha, 0xa007,
+	    "Detect vport hba %ld at address = %p.\n",
+	    vha->host_no, vha);
+
+	vha->flags.init_done = 1;
+
+	mutex_lock(&ha->vport_lock);
+	set_bit(vha->vp_idx, ha->vp_idx_map);
+	ha->cur_vport_count++;
+	mutex_unlock(&ha->vport_lock);
+
+	return vha;
+
+create_vhost_failed:
+	return NULL;
+}
+
+static void
+qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t que_id = req->id;
+
+	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
+		sizeof(request_t), req->ring, req->dma);
+	req->ring = NULL;
+	req->dma = 0;
+	if (que_id) {
+		ha->req_q_map[que_id] = NULL;
+		mutex_lock(&ha->vport_lock);
+		clear_bit(que_id, ha->req_qid_map);
+		mutex_unlock(&ha->vport_lock);
+	}
+	kfree(req->outstanding_cmds);
+	kfree(req);
+	req = NULL;
+}
+
+static void
+qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t que_id = rsp->id;
+
+	if (rsp->msix && rsp->msix->have_irq) {
+		free_irq(rsp->msix->vector, rsp->msix->handle);
+		rsp->msix->have_irq = 0;
+		rsp->msix->in_use = 0;
+		rsp->msix->handle = NULL;
+	}
+	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
+		sizeof(response_t), rsp->ring, rsp->dma);
+	rsp->ring = NULL;
+	rsp->dma = 0;
+	if (que_id) {
+		ha->rsp_q_map[que_id] = NULL;
+		mutex_lock(&ha->vport_lock);
+		clear_bit(que_id, ha->rsp_qid_map);
+		mutex_unlock(&ha->vport_lock);
+	}
+	kfree(rsp);
+	rsp = NULL;
+}
+
+int
+qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+	int ret = QLA_SUCCESS;
+
+	if (req && vha->flags.qpairs_req_created) {
+		req->options |= BIT_0;
+		ret = qla25xx_init_req_que(vha, req);
+		if (ret != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+
+		qla25xx_free_req_que(vha, req);
+	}
+
+	return ret;
+}
+
+int
+qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+	int ret = QLA_SUCCESS;
+
+	if (rsp && vha->flags.qpairs_rsp_created) {
+		rsp->options |= BIT_0;
+		ret = qla25xx_init_rsp_que(vha, rsp);
+		if (ret != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+
+		qla25xx_free_rsp_que(vha, rsp);
+	}
+
+	return ret;
+}
+
+/* Delete all queues for a given vhost */
+int
+qla25xx_delete_queues(struct scsi_qla_host *vha)
+{
+	int cnt, ret = 0;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_qpair *qpair, *tqpair;
+
+	if (ql2xmqsupport) {
+		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+		    qp_list_elem)
+			qla2xxx_delete_qpair(vha, qpair);
+	} else {
+		/* Delete request queues */
+		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+			req = ha->req_q_map[cnt];
+			if (req && test_bit(cnt, ha->req_qid_map)) {
+				ret = qla25xx_delete_req_que(vha, req);
+				if (ret != QLA_SUCCESS) {
+					ql_log(ql_log_warn, vha, 0x00ea,
+					    "Couldn't delete req que %d.\n",
+					    req->id);
+					return ret;
+				}
+			}
+		}
+
+		/* Delete response queues */
+		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+			rsp = ha->rsp_q_map[cnt];
+			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+				ret = qla25xx_delete_rsp_que(vha, rsp);
+				if (ret != QLA_SUCCESS) {
+					ql_log(ql_log_warn, vha, 0x00eb,
+					    "Couldn't delete rsp que %d.\n",
+					    rsp->id);
+					return ret;
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+int
+qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
+    uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
+{
+	int ret = 0;
+	struct req_que *req = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+	uint16_t que_id = 0;
+	device_reg_t *reg;
+	uint32_t cnt;
+
+	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+	if (req == NULL) {
+		ql_log(ql_log_fatal, base_vha, 0x00d9,
+		    "Failed to allocate memory for request queue.\n");
+		goto failed;
+	}
+
+	req->length = REQUEST_ENTRY_CNT_24XX;
+	req->ring = dma_alloc_coherent(&ha->pdev->dev,
+			(req->length + 1) * sizeof(request_t),
+			&req->dma, GFP_KERNEL);
+	if (req->ring == NULL) {
+		ql_log(ql_log_fatal, base_vha, 0x00da,
+		    "Failed to allocate memory for request_ring.\n");
+		goto que_failed;
+	}
+
+	ret = qla2x00_alloc_outstanding_cmds(ha, req);
+	if (ret != QLA_SUCCESS)
+		goto que_failed;
+
+	mutex_lock(&ha->mq_lock);
+	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
+	if (que_id >= ha->max_req_queues) {
+		mutex_unlock(&ha->mq_lock);
+		ql_log(ql_log_warn, base_vha, 0x00db,
+		    "No resources to create additional request queue.\n");
+		goto que_failed;
+	}
+	set_bit(que_id, ha->req_qid_map);
+	ha->req_q_map[que_id] = req;
+	req->rid = rid;
+	req->vp_idx = vp_idx;
+	req->qos = qos;
+
+	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
+	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+	    que_id, req->rid, req->vp_idx, req->qos);
+	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
+	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+	    que_id, req->rid, req->vp_idx, req->qos);
+	if (rsp_que < 0)
+		req->rsp = NULL;
+	else
+		req->rsp = ha->rsp_q_map[rsp_que];
+	/* Use alternate PCI bus number */
+	if (MSB(req->rid))
+		options |= BIT_4;
+	/* Use alternate PCI devfn */
+	if (LSB(req->rid))
+		options |= BIT_5;
+	req->options = options;
+
+	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
+	    "options=0x%x.\n", req->options);
+	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
+	    "options=0x%x.\n", req->options);
+	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
+		req->outstanding_cmds[cnt] = NULL;
+	req->current_outstanding_cmd = 1;
+
+	req->ring_ptr = req->ring;
+	req->ring_index = 0;
+	req->cnt = req->length;
+	req->id = que_id;
+	reg = ISP_QUE_REG(ha, que_id);
+	req->req_q_in = &reg->isp25mq.req_q_in;
+	req->req_q_out = &reg->isp25mq.req_q_out;
+	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
+	req->out_ptr = (void *)(req->ring + req->length);
+	mutex_unlock(&ha->mq_lock);
+	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
+	    "ring_ptr=%p ring_index=%d, "
+	    "cnt=%d id=%d max_q_depth=%d.\n",
+	    req->ring_ptr, req->ring_index,
+	    req->cnt, req->id, req->max_q_depth);
+	ql_dbg(ql_dbg_init, base_vha, 0x00de,
+	    "ring_ptr=%p ring_index=%d, "
+	    "cnt=%d id=%d max_q_depth=%d.\n",
+	    req->ring_ptr, req->ring_index, req->cnt,
+	    req->id, req->max_q_depth);
+
+	if (startqp) {
+		ret = qla25xx_init_req_que(base_vha, req);
+		if (ret != QLA_SUCCESS) {
+			ql_log(ql_log_fatal, base_vha, 0x00df,
+			    "%s failed.\n", __func__);
+			mutex_lock(&ha->mq_lock);
+			clear_bit(que_id, ha->req_qid_map);
+			mutex_unlock(&ha->mq_lock);
+			goto que_failed;
+		}
+		vha->flags.qpairs_req_created = 1;
+	}
+
+	return req->id;
+
+que_failed:
+	qla25xx_free_req_que(base_vha, req);
+failed:
+	return 0;
+}
+
+static void qla_do_work(struct work_struct *work)
+{
+	unsigned long flags;
+	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha = qpair->hw;
+	struct srb_iocb	*nvme, *nxt_nvme;
+
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	qla24xx_process_response_queue(vha, qpair->rsp);
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+	list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
+		    u.nvme.entry) {
+		list_del_init(&nvme->u.nvme.entry);
+		qla_nvme_cmpl_io(nvme);
+	}
+}
+
+/* create response queue */
+int
+qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
+    uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
+{
+	int ret = 0;
+	struct rsp_que *rsp = NULL;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+	uint16_t que_id = 0;
+	device_reg_t *reg;
+
+	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+	if (rsp == NULL) {
+		ql_log(ql_log_warn, base_vha, 0x0066,
+		    "Failed to allocate memory for response queue.\n");
+		goto failed;
+	}
+
+	rsp->length = RESPONSE_ENTRY_CNT_MQ;
+	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
+			(rsp->length + 1) * sizeof(response_t),
+			&rsp->dma, GFP_KERNEL);
+	if (rsp->ring == NULL) {
+		ql_log(ql_log_warn, base_vha, 0x00e1,
+		    "Failed to allocate memory for response ring.\n");
+		goto que_failed;
+	}
+
+	mutex_lock(&ha->mq_lock);
+	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
+	if (que_id >= ha->max_rsp_queues) {
+		mutex_unlock(&ha->mq_lock);
+		ql_log(ql_log_warn, base_vha, 0x00e2,
+		    "No resources to create additional request queue.\n");
+		goto que_failed;
+	}
+	set_bit(que_id, ha->rsp_qid_map);
+
+	rsp->msix = qpair->msix;
+
+	ha->rsp_q_map[que_id] = rsp;
+	rsp->rid = rid;
+	rsp->vp_idx = vp_idx;
+	rsp->hw = ha;
+	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
+	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
+	/* Use alternate PCI bus number */
+	if (MSB(rsp->rid))
+		options |= BIT_4;
+	/* Use alternate PCI devfn */
+	if (LSB(rsp->rid))
+		options |= BIT_5;
+	/* Enable MSIX handshake mode on for uncapable adapters */
+	if (!IS_MSIX_NACK_CAPABLE(ha))
+		options |= BIT_6;
+
+	/* Set option to indicate response queue creation */
+	options |= BIT_1;
+
+	rsp->options = options;
+	rsp->id = que_id;
+	reg = ISP_QUE_REG(ha, que_id);
+	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
+	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
+	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+	mutex_unlock(&ha->mq_lock);
+	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
+	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
+	    rsp->options, rsp->id, rsp->rsp_q_in,
+	    rsp->rsp_q_out);
+	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
+	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
+	    rsp->options, rsp->id, rsp->rsp_q_in,
+	    rsp->rsp_q_out);
+
+	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+	    QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
+	if (ret)
+		goto que_failed;
+
+	if (startqp) {
+		ret = qla25xx_init_rsp_que(base_vha, rsp);
+		if (ret != QLA_SUCCESS) {
+			ql_log(ql_log_fatal, base_vha, 0x00e7,
+			    "%s failed.\n", __func__);
+			mutex_lock(&ha->mq_lock);
+			clear_bit(que_id, ha->rsp_qid_map);
+			mutex_unlock(&ha->mq_lock);
+			goto que_failed;
+		}
+		vha->flags.qpairs_rsp_created = 1;
+	}
+	rsp->req = NULL;
+
+	qla2x00_init_response_q_entries(rsp);
+	if (qpair->hw->wq)
+		INIT_WORK(&qpair->q_work, qla_do_work);
+	return rsp->id;
+
+que_failed:
+	qla25xx_free_rsp_que(base_vha, rsp);
+failed:
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mr.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mr.c
new file mode 100644
index 0000000..e23a3d4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mr.c
@@ -0,0 +1,3460 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <linux/bsg-lib.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/utsname.h>
+
+
+/* QLAFX00 specific Mailbox implementation functions */
+
+/*
+ * qlafx00_mailbox_command
+ *	Issue mailbox command and waits for completion.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ *	0 : QLA_SUCCESS = cmd performed success
+ *	1 : QLA_FUNCTION_FAILED   (error encountered)
+ *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
+
+{
+	int		rval;
+	unsigned long    flags = 0;
+	device_reg_t *reg;
+	uint8_t		abort_active;
+	uint8_t		io_lock_on;
+	uint16_t	command = 0;
+	uint32_t	*iptr;
+	uint32_t __iomem *optr;
+	uint32_t	cnt;
+	uint32_t	mboxes;
+	unsigned long	wait_time;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	if (ha->pdev->error_state > pci_channel_io_frozen) {
+		ql_log(ql_log_warn, vha, 0x115c,
+		    "error_state is greater than pci_channel_io_frozen, "
+		    "exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_log(ql_log_warn, vha, 0x115f,
+		    "Device in failed state, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	reg = ha->iobase;
+	io_lock_on = base_vha->flags.init_done;
+
+	rval = QLA_SUCCESS;
+	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+	if (ha->flags.pci_channel_io_perm_failure) {
+		ql_log(ql_log_warn, vha, 0x1175,
+		    "Perm failure on EEH timeout MBX, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (ha->flags.isp82xx_fw_hung) {
+		/* Setting Link-Down error */
+		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+		ql_log(ql_log_warn, vha, 0x1176,
+		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+		rval = QLA_FUNCTION_FAILED;
+		goto premature_exit;
+	}
+
+	/*
+	 * Wait for active mailbox commands to finish by waiting at most tov
+	 * seconds. This is to serialize actual issuing of mailbox cmds during
+	 * non ISP abort time.
+	 */
+	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+		/* Timeout occurred. Return error. */
+		ql_log(ql_log_warn, vha, 0x1177,
+		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
+		    mcp->mb[0]);
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	ha->flags.mbox_busy = 1;
+	/* Save mailbox command for debug */
+	ha->mcp32 = mcp;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1178,
+	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Load mailbox registers. */
+	optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+
+	iptr = mcp->mb;
+	command = mcp->mb[0];
+	mboxes = mcp->out_mb;
+
+	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+		if (mboxes & BIT_0)
+			WRT_REG_DWORD(optr, *iptr);
+
+		mboxes >>= 1;
+		optr++;
+		iptr++;
+	}
+
+	/* Issue set host interrupt command to send cmd out. */
+	ha->flags.mbox_int = 0;
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
+	    (uint8_t *)mcp->mb, 16);
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
+	    ((uint8_t *)mcp->mb + 0x10), 16);
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
+	    ((uint8_t *)mcp->mb + 0x20), 8);
+
+	/* Unlock mbx registers and wait for interrupt */
+	ql_dbg(ql_dbg_mbx, vha, 0x1179,
+	    "Going to unlock irq & waiting for interrupts. "
+	    "jiffies=%lx.\n", jiffies);
+
+	/* Wait for mbx cmd completion until timeout */
+	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+	} else {
+		ql_dbg(ql_dbg_mbx, vha, 0x112c,
+		    "Cmd=%x Polling Mode.\n", command);
+
+		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+		while (!ha->flags.mbox_int) {
+			if (time_after(jiffies, wait_time))
+				break;
+
+			/* Check for pending interrupts. */
+			qla2x00_poll(ha->rsp_q_map[0]);
+
+			if (!ha->flags.mbox_int &&
+			    !(IS_QLA2200(ha) &&
+			    command == MBC_LOAD_RISC_RAM_EXTENDED))
+				usleep_range(10000, 11000);
+		} /* while */
+		ql_dbg(ql_dbg_mbx, vha, 0x112d,
+		    "Waited %d sec.\n",
+		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+	}
+
+	/* Check whether we timed out */
+	if (ha->flags.mbox_int) {
+		uint32_t *iptr2;
+
+		ql_dbg(ql_dbg_mbx, vha, 0x112e,
+		    "Cmd=%x completed.\n", command);
+
+		/* Got interrupt. Clear the flag. */
+		ha->flags.mbox_int = 0;
+		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+		if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
+			rval = QLA_FUNCTION_FAILED;
+
+		/* Load return mailbox registers. */
+		iptr2 = mcp->mb;
+		iptr = (uint32_t *)&ha->mailbox_out32[0];
+		mboxes = mcp->in_mb;
+		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+			if (mboxes & BIT_0)
+				*iptr2 = *iptr;
+
+			mboxes >>= 1;
+			iptr2++;
+			iptr++;
+		}
+	} else {
+
+		rval = QLA_FUNCTION_TIMEOUT;
+	}
+
+	ha->flags.mbox_busy = 0;
+
+	/* Clean up */
+	ha->mcp32 = NULL;
+
+	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x113a,
+		    "checking for additional resp interrupt.\n");
+
+		/* polling mode for non isp_abort commands. */
+		qla2x00_poll(ha->rsp_q_map[0]);
+	}
+
+	if (rval == QLA_FUNCTION_TIMEOUT &&
+	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+		    ha->flags.eeh_busy) {
+			/* not in dpc. schedule it for dpc to take over. */
+			ql_dbg(ql_dbg_mbx, vha, 0x115d,
+			    "Timeout, schedule isp_abort_needed.\n");
+
+			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+				ql_log(ql_log_info, base_vha, 0x115e,
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
+				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+				    "abort.\n", command, mcp->mb[0],
+				    ha->flags.eeh_busy);
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+		} else if (!abort_active) {
+			/* call abort directly since we are in the DPC thread */
+			ql_dbg(ql_dbg_mbx, vha, 0x1160,
+			    "Timeout, calling abort_isp.\n");
+
+			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+				ql_log(ql_log_info, base_vha, 0x1161,
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
+				    "mb[0]=0x%x. Scheduling ISP abort ",
+				    command, mcp->mb[0]);
+
+				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				if (ha->isp_ops->abort_isp(vha)) {
+					/* Failed. retry later. */
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+				}
+				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+				ql_dbg(ql_dbg_mbx, vha, 0x1162,
+				    "Finished abort_isp.\n");
+			}
+		}
+	}
+
+premature_exit:
+	/* Allow next mbx cmd to come in. */
+	complete(&ha->mbx_cmd_comp);
+
+	if (rval) {
+		ql_log(ql_log_warn, base_vha, 0x1163,
+		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
+		    "mb[3]=%x, cmd=%x ****.\n",
+		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+	} else {
+		ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qlafx00_driver_shutdown
+ *	Indicate a driver shutdown to firmware.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *
+ * Returns:
+ *	local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_0;
+	if (tmo)
+		mcp->tov = tmo;
+	else
+		mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qlafx00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1167,
+		    "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qlafx00_get_firmware_state
+ *	Get adapter firmware state.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla7xxx local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qlafx00_mailbox_command(vha, mcp);
+
+	/* Return firmware states. */
+	states[0] = mcp->mb[1];
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x116a,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/*
+ * qlafx00_init_firmware
+ *	Initialize adapter firmware.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	dptr = Initialization control block pointer.
+ *	size = size of initialization control block.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qlafx00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+	mcp->mb[1] = 0;
+	mcp->mb[2] = MSD(ha->init_cb_dma);
+	mcp->mb[3] = LSD(ha->init_cb_dma);
+
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->buf_size = size;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qlafx00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x116d,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/*
+ * qlafx00_mbx_reg_test
+ */
+static int
+qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
+	    "Entered %s.\n", __func__);
+
+
+	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+	mcp->mb[1] = 0xAAAA;
+	mcp->mb[2] = 0x5555;
+	mcp->mb[3] = 0xAA55;
+	mcp->mb[4] = 0x55AA;
+	mcp->mb[5] = 0xA5A5;
+	mcp->mb[6] = 0x5A5A;
+	mcp->mb[7] = 0x2525;
+	mcp->mb[8] = 0xBBBB;
+	mcp->mb[9] = 0x6666;
+	mcp->mb[10] = 0xBB66;
+	mcp->mb[11] = 0x66BB;
+	mcp->mb[12] = 0xB6B6;
+	mcp->mb[13] = 0x6B6B;
+	mcp->mb[14] = 0x3636;
+	mcp->mb[15] = 0xCCCC;
+
+
+	mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->buf_size = 0;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qlafx00_mailbox_command(vha, mcp);
+	if (rval == QLA_SUCCESS) {
+		if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
+		    mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
+		    mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
+		    mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
+		    mcp->mb[31] != 0xCCCC)
+			rval = QLA_FUNCTION_FAILED;
+	}
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1170,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/**
+ * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_pci_config(scsi_qla_host_t *vha)
+{
+	uint16_t w;
+	struct qla_hw_data *ha = vha->hw;
+
+	pci_set_master(ha->pdev);
+	pci_try_set_mwi(ha->pdev);
+
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+	w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+	/* PCIe -- adjust Maximum Read Request Size (2048). */
+	if (pci_is_pcie(ha->pdev))
+		pcie_set_readrq(ha->pdev, 2048);
+
+	ha->chip_revision = ha->pdev->revision;
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * @ha: HA context
+ *
+  */
+static inline void
+qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
+{
+	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	int i, core;
+	uint32_t cnt;
+	uint32_t reg_val;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
+
+	/* stop the XOR DMA engines */
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
+
+	/* stop the IDMA engines */
+	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
+	reg_val &= ~(1<<12);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
+
+	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
+	reg_val &= ~(1<<12);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
+
+	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
+	reg_val &= ~(1<<12);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
+
+	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
+	reg_val &= ~(1<<12);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
+
+	for (i = 0; i < 100000; i++) {
+		if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
+		    (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
+			break;
+		udelay(100);
+	}
+
+	/* Set all 4 cores in reset */
+	for (i = 0; i < 4; i++) {
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
+	}
+
+	/* Reset all units in Fabric */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
+
+	/* */
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
+	QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
+
+	/* Set all 4 core Memory Power Down Registers */
+	for (i = 0; i < 5; i++) {
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
+	}
+
+	/* Reset all interrupt control registers */
+	for (i = 0; i < 115; i++) {
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
+	}
+
+	/* Reset Timers control registers. per core */
+	for (core = 0; core < 4; core++)
+		for (i = 0; i < 8; i++)
+			QLAFX00_SET_HBA_SOC_REG(ha,
+			    (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
+
+	/* Reset per core IRQ ack register */
+	for (core = 0; core < 4; core++)
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
+
+	/* Set Fabric control and config to defaults */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
+
+	/* Kick in Fabric units */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
+
+	/* Kick in Core0 to start boot process */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Wait 10secs for soft-reset to complete. */
+	for (cnt = 10; cnt; cnt--) {
+		msleep(1000);
+		barrier();
+	}
+}
+
+/**
+ * qlafx00_soft_reset() - Soft Reset ISPFx00.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_soft_reset(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (unlikely(pci_channel_offline(ha->pdev) &&
+	    ha->flags.pci_channel_io_perm_failure))
+		return;
+
+	ha->isp_ops->disable_intrs(ha);
+	qlafx00_soc_cpu_reset(vha);
+}
+
+/**
+ * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_chip_diag(scsi_qla_host_t *vha)
+{
+	int rval = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+	rval = qlafx00_mbx_reg_test(vha);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x1165,
+		    "Failed mailbox send register test\n");
+	} else {
+		/* Flag a successful rval */
+		rval = QLA_SUCCESS;
+	}
+	return rval;
+}
+
+void
+qlafx00_config_rings(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+	WRT_REG_DWORD(&reg->req_q_in, 0);
+	WRT_REG_DWORD(&reg->req_q_out, 0);
+
+	WRT_REG_DWORD(&reg->rsp_q_in, 0);
+	WRT_REG_DWORD(&reg->rsp_q_out, 0);
+
+	/* PCI posting */
+	RD_REG_DWORD(&reg->rsp_q_out);
+}
+
+char *
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (pci_is_pcie(ha->pdev)) {
+		strcpy(str, "PCIe iSA");
+		return str;
+	}
+	return str;
+}
+
+char *
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	snprintf(str, size, "%s", ha->mr.fw_version);
+	return str;
+}
+
+void
+qlafx00_enable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 1;
+	QLAFX00_ENABLE_ICNTRL_REG(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlafx00_disable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 0;
+	QLAFX00_DISABLE_ICNTRL_REG(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+int
+qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag)
+{
+	return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+}
+
+int
+qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
+{
+	return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+}
+
+int
+qlafx00_loop_reset(scsi_qla_host_t *vha)
+{
+	int ret;
+	struct fc_port *fcport;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ql2xtargetreset) {
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (fcport->port_type != FCT_TARGET)
+				continue;
+
+			ret = ha->isp_ops->target_reset(fcport, 0, 0);
+			if (ret != QLA_SUCCESS) {
+				ql_dbg(ql_dbg_taskm, vha, 0x803d,
+				    "Bus Reset failed: Reset=%d "
+				    "d_id=%x.\n", ret, fcport->d_id.b24);
+			}
+		}
+	}
+	return QLA_SUCCESS;
+}
+
+int
+qlafx00_iospace_config(struct qla_hw_data *ha)
+{
+	if (pci_request_selected_regions(ha->pdev, ha->bars,
+	    QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
+		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
+		    "Invalid pci I/O region size (%s).\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
+		    "Invalid PCI mem BAR0 region size (%s), aborting\n",
+			pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->cregbase =
+	    ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+	if (!ha->cregbase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
+		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
+		    "region #2 not an MMIO resource (%s), aborting\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
+		    "Invalid PCI mem BAR2 region size (%s), aborting\n",
+			pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->iobase =
+	    ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+	if (!ha->iobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
+		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* Determine queue resources */
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+
+	ql_log_pci(ql_log_info, ha->pdev, 0x012c,
+	    "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
+	    ha->bars, ha->cregbase, ha->iobase);
+
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+static void
+qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	req->length_fx00 = req->length;
+	req->ring_fx00 = req->ring;
+	req->dma_fx00 = req->dma;
+
+	rsp->length_fx00 = rsp->length;
+	rsp->ring_fx00 = rsp->ring;
+	rsp->dma_fx00 = rsp->dma;
+
+	ql_dbg(ql_dbg_init, vha, 0x012d,
+	    "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
+	    "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
+	    req->length_fx00, (u64)req->dma_fx00);
+
+	ql_dbg(ql_dbg_init, vha, 0x012e,
+	    "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
+	    "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
+	    rsp->length_fx00, (u64)rsp->dma_fx00);
+}
+
+static int
+qlafx00_config_queues(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+	dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
+
+	req->length = ha->req_que_len;
+	req->ring = (void __force *)ha->iobase + ha->req_que_off;
+	req->dma = bar2_hdl + ha->req_que_off;
+	if ((!req->ring) || (req->length == 0)) {
+		ql_log_pci(ql_log_info, ha->pdev, 0x012f,
+		    "Unable to allocate memory for req_ring\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x0130,
+	    "req: %p req_ring pointer %p req len 0x%x "
+	    "req off 0x%x\n, req->dma: 0x%llx",
+	    req, req->ring, req->length,
+	    ha->req_que_off, (u64)req->dma);
+
+	rsp->length = ha->rsp_que_len;
+	rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
+	rsp->dma = bar2_hdl + ha->rsp_que_off;
+	if ((!rsp->ring) || (rsp->length == 0)) {
+		ql_log_pci(ql_log_info, ha->pdev, 0x0131,
+		    "Unable to allocate memory for rsp_ring\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x0132,
+	    "rsp: %p rsp_ring pointer %p rsp len 0x%x "
+	    "rsp off 0x%x, rsp->dma: 0x%llx\n",
+	    rsp, rsp->ring, rsp->length,
+	    ha->rsp_que_off, (u64)rsp->dma);
+
+	return QLA_SUCCESS;
+}
+
+static int
+qlafx00_init_fw_ready(scsi_qla_host_t *vha)
+{
+	int rval = 0;
+	unsigned long wtime;
+	uint16_t wait_time;	/* Wait time */
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	uint32_t aenmbx, aenmbx7 = 0;
+	uint32_t pseudo_aen;
+	uint32_t state[5];
+	bool done = false;
+
+	/* 30 seconds wait - Adjust if required */
+	wait_time = 30;
+
+	pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+	if (pseudo_aen == 1) {
+		aenmbx7 = RD_REG_DWORD(&reg->initval7);
+		ha->mbx_intr_code = MSW(aenmbx7);
+		ha->rqstq_intr_code = LSW(aenmbx7);
+		rval = qlafx00_driver_shutdown(vha, 10);
+		if (rval != QLA_SUCCESS)
+			qlafx00_soft_reset(vha);
+	}
+
+	/* wait time before firmware ready */
+	wtime = jiffies + (wait_time * HZ);
+	do {
+		aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+		barrier();
+		ql_dbg(ql_dbg_mbx, vha, 0x0133,
+		    "aenmbx: 0x%x\n", aenmbx);
+
+		switch (aenmbx) {
+		case MBA_FW_NOT_STARTED:
+		case MBA_FW_STARTING:
+			break;
+
+		case MBA_SYSTEM_ERR:
+		case MBA_REQ_TRANSFER_ERR:
+		case MBA_RSP_TRANSFER_ERR:
+		case MBA_FW_INIT_FAILURE:
+			qlafx00_soft_reset(vha);
+			break;
+
+		case MBA_FW_RESTART_CMPLT:
+			/* Set the mbx and rqstq intr code */
+			aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+			ha->mbx_intr_code = MSW(aenmbx7);
+			ha->rqstq_intr_code = LSW(aenmbx7);
+			ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+			ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+			ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+			ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+			WRT_REG_DWORD(&reg->aenmailbox0, 0);
+			RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+			ql_dbg(ql_dbg_init, vha, 0x0134,
+			    "f/w returned mbx_intr_code: 0x%x, "
+			    "rqstq_intr_code: 0x%x\n",
+			    ha->mbx_intr_code, ha->rqstq_intr_code);
+			QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+			rval = QLA_SUCCESS;
+			done = true;
+			break;
+
+		default:
+			if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
+				break;
+
+			/* If fw is apparently not ready. In order to continue,
+			 * we might need to issue Mbox cmd, but the problem is
+			 * that the DoorBell vector values that come with the
+			 * 8060 AEN are most likely gone by now (and thus no
+			 * bell would be rung on the fw side when mbox cmd is
+			 * issued). We have to therefore grab the 8060 AEN
+			 * shadow regs (filled in by FW when the last 8060
+			 * AEN was being posted).
+			 * Do the following to determine what is needed in
+			 * order to get the FW ready:
+			 * 1. reload the 8060 AEN values from the shadow regs
+			 * 2. clear int status to get rid of possible pending
+			 *    interrupts
+			 * 3. issue Get FW State Mbox cmd to determine fw state
+			 * Set the mbx and rqstq intr code from Shadow Regs
+			 */
+			aenmbx7 = RD_REG_DWORD(&reg->initval7);
+			ha->mbx_intr_code = MSW(aenmbx7);
+			ha->rqstq_intr_code = LSW(aenmbx7);
+			ha->req_que_off = RD_REG_DWORD(&reg->initval1);
+			ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
+			ha->req_que_len = RD_REG_DWORD(&reg->initval5);
+			ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+			ql_dbg(ql_dbg_init, vha, 0x0135,
+			    "f/w returned mbx_intr_code: 0x%x, "
+			    "rqstq_intr_code: 0x%x\n",
+			    ha->mbx_intr_code, ha->rqstq_intr_code);
+			QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+			/* Get the FW state */
+			rval = qlafx00_get_firmware_state(vha, state);
+			if (rval != QLA_SUCCESS) {
+				/* Retry if timer has not expired */
+				break;
+			}
+
+			if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
+				/* Firmware is waiting to be
+				 * initialized by driver
+				 */
+				rval = QLA_SUCCESS;
+				done = true;
+				break;
+			}
+
+			/* Issue driver shutdown and wait until f/w recovers.
+			 * Driver should continue to poll until 8060 AEN is
+			 * received indicating firmware recovery.
+			 */
+			ql_dbg(ql_dbg_init, vha, 0x0136,
+			    "Sending Driver shutdown fw_state 0x%x\n",
+			    state[0]);
+
+			rval = qlafx00_driver_shutdown(vha, 10);
+			if (rval != QLA_SUCCESS) {
+				rval = QLA_FUNCTION_FAILED;
+				break;
+			}
+			msleep(500);
+
+			wtime = jiffies + (wait_time * HZ);
+			break;
+		}
+
+		if (!done) {
+			if (time_after_eq(jiffies, wtime)) {
+				ql_dbg(ql_dbg_init, vha, 0x0137,
+				    "Init f/w failed: aen[7]: 0x%x\n",
+				    RD_REG_DWORD(&reg->aenmailbox7));
+				rval = QLA_FUNCTION_FAILED;
+				done = true;
+				break;
+			}
+			/* Delay for a while */
+			msleep(500);
+		}
+	} while (!done);
+
+	if (rval)
+		ql_dbg(ql_dbg_init, vha, 0x0138,
+		    "%s **** FAILED ****.\n", __func__);
+	else
+		ql_dbg(ql_dbg_init, vha, 0x0139,
+		    "%s **** SUCCESS ****.\n", __func__);
+
+	return rval;
+}
+
+/*
+ * qlafx00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_fw_ready(scsi_qla_host_t *vha)
+{
+	int		rval;
+	unsigned long	wtime;
+	uint16_t	wait_time;	/* Wait time if loop is coming ready */
+	uint32_t	state[5];
+
+	rval = QLA_SUCCESS;
+
+	wait_time = 10;
+
+	/* wait time before firmware ready */
+	wtime = jiffies + (wait_time * HZ);
+
+	/* Wait for ISP to finish init */
+	if (!vha->flags.init_done)
+		ql_dbg(ql_dbg_init, vha, 0x013a,
+		    "Waiting for init to complete...\n");
+
+	do {
+		rval = qlafx00_get_firmware_state(vha, state);
+
+		if (rval == QLA_SUCCESS) {
+			if (state[0] == FSTATE_FX00_INITIALIZED) {
+				ql_dbg(ql_dbg_init, vha, 0x013b,
+				    "fw_state=%x\n", state[0]);
+				rval = QLA_SUCCESS;
+					break;
+			}
+		}
+		rval = QLA_FUNCTION_FAILED;
+
+		if (time_after_eq(jiffies, wtime))
+			break;
+
+		/* Delay for a while */
+		msleep(500);
+
+		ql_dbg(ql_dbg_init, vha, 0x013c,
+		    "fw_state=%x curr time=%lx.\n", state[0], jiffies);
+	} while (1);
+
+
+	if (rval)
+		ql_dbg(ql_dbg_init, vha, 0x013d,
+		    "Firmware ready **** FAILED ****.\n");
+	else
+		ql_dbg(ql_dbg_init, vha, 0x013e,
+		    "Firmware ready **** SUCCESS ****.\n");
+
+	return rval;
+}
+
+static int
+qlafx00_find_all_targets(scsi_qla_host_t *vha,
+	struct list_head *new_fcports)
+{
+	int		rval;
+	uint16_t	tgt_id;
+	fc_port_t	*fcport, *new_fcport;
+	int		found;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = QLA_SUCCESS;
+
+	if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+		return QLA_FUNCTION_FAILED;
+
+	if ((atomic_read(&vha->loop_down_timer) ||
+	     STATE_TRANSITION(vha))) {
+		atomic_set(&vha->loop_down_timer, 0);
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
+	    "Listing Target bit map...\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
+	    0x2089, (uint8_t *)ha->gid_list, 32);
+
+	/* Allocate temporary rmtport for any new rmtports discovered. */
+	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (new_fcport == NULL)
+		return QLA_MEMORY_ALLOC_FAILED;
+
+	for_each_set_bit(tgt_id, (void *)ha->gid_list,
+	    QLAFX00_TGT_NODE_LIST_SIZE) {
+
+		/* Send get target node info */
+		new_fcport->tgt_id = tgt_id;
+		rval = qlafx00_fx_disc(vha, new_fcport,
+		    FXDISC_GET_TGT_NODE_INFO);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x208a,
+			    "Target info scan failed -- assuming zero-entry "
+			    "result...\n");
+			continue;
+		}
+
+		/* Locate matching device in database. */
+		found = 0;
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (memcmp(new_fcport->port_name,
+			    fcport->port_name, WWN_SIZE))
+				continue;
+
+			found++;
+
+			/*
+			 * If tgt_id is same and state FCS_ONLINE, nothing
+			 * changed.
+			 */
+			if (fcport->tgt_id == new_fcport->tgt_id &&
+			    atomic_read(&fcport->state) == FCS_ONLINE)
+				break;
+
+			/*
+			 * Tgt ID changed or device was marked to be updated.
+			 */
+			ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
+			    "TGT-ID Change(%s): Present tgt id: "
+			    "0x%x state: 0x%x "
+			    "wwnn = %llx wwpn = %llx.\n",
+			    __func__, fcport->tgt_id,
+			    atomic_read(&fcport->state),
+			    (unsigned long long)wwn_to_u64(fcport->node_name),
+			    (unsigned long long)wwn_to_u64(fcport->port_name));
+
+			ql_log(ql_log_info, vha, 0x208c,
+			    "TGT-ID Announce(%s): Discovered tgt "
+			    "id 0x%x wwnn = %llx "
+			    "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
+			    (unsigned long long)
+			    wwn_to_u64(new_fcport->node_name),
+			    (unsigned long long)
+			    wwn_to_u64(new_fcport->port_name));
+
+			if (atomic_read(&fcport->state) != FCS_ONLINE) {
+				fcport->old_tgt_id = fcport->tgt_id;
+				fcport->tgt_id = new_fcport->tgt_id;
+				ql_log(ql_log_info, vha, 0x208d,
+				   "TGT-ID: New fcport Added: %p\n", fcport);
+				qla2x00_update_fcport(vha, fcport);
+			} else {
+				ql_log(ql_log_info, vha, 0x208e,
+				    " Existing TGT-ID %x did not get "
+				    " offline event from firmware.\n",
+				    fcport->old_tgt_id);
+				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				kfree(new_fcport);
+				return rval;
+			}
+			break;
+		}
+
+		if (found)
+			continue;
+
+		/* If device was not in our fcports list, then add it. */
+		list_add_tail(&new_fcport->list, new_fcports);
+
+		/* Allocate a new replacement fcport. */
+		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (new_fcport == NULL)
+			return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	kfree(new_fcport);
+	return rval;
+}
+
+/*
+ * qlafx00_configure_all_targets
+ *      Setup target devices with node ID's.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      BIT_0 = error
+ */
+static int
+qlafx00_configure_all_targets(scsi_qla_host_t *vha)
+{
+	int rval;
+	fc_port_t *fcport, *rmptemp;
+	LIST_HEAD(new_fcports);
+
+	rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+	    FXDISC_GET_TGT_NODE_LIST);
+	if (rval != QLA_SUCCESS) {
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		return rval;
+	}
+
+	rval = qlafx00_find_all_targets(vha, &new_fcports);
+	if (rval != QLA_SUCCESS) {
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		return rval;
+	}
+
+	/*
+	 * Delete all previous devices marked lost.
+	 */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
+
+		if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+			if (fcport->port_type != FCT_INITIATOR)
+				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+		}
+	}
+
+	/*
+	 * Add the new devices to our devices list.
+	 */
+	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
+
+		qla2x00_update_fcport(vha, fcport);
+		list_move_tail(&fcport->list, &vha->vp_fcports);
+		ql_log(ql_log_info, vha, 0x208f,
+		    "Attach new target id 0x%x wwnn = %llx "
+		    "wwpn = %llx.\n",
+		    fcport->tgt_id,
+		    (unsigned long long)wwn_to_u64(fcport->node_name),
+		    (unsigned long long)wwn_to_u64(fcport->port_name));
+	}
+
+	/* Free all new device structures not processed. */
+	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+		list_del(&fcport->list);
+		kfree(fcport);
+	}
+
+	return rval;
+}
+
+/*
+ * qlafx00_configure_devices
+ *      Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ *      ha                = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      1 = error.
+ *      2 = database was full and device was not configured.
+ */
+int
+qlafx00_configure_devices(scsi_qla_host_t *vha)
+{
+	int  rval;
+	unsigned long flags;
+	rval = QLA_SUCCESS;
+
+	flags = vha->dpc_flags;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2090,
+	    "Configure devices -- dpc flags =0x%lx\n", flags);
+
+	rval = qlafx00_configure_all_targets(vha);
+
+	if (rval == QLA_SUCCESS) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			atomic_set(&vha->loop_state, LOOP_READY);
+			ql_log(ql_log_info, vha, 0x2091,
+			    "Device Ready\n");
+		}
+	}
+
+	if (rval) {
+		ql_dbg(ql_dbg_disc, vha, 0x2092,
+		    "%s *** FAILED ***.\n", __func__);
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2093,
+		    "%s: exiting normally.\n", __func__);
+	}
+	return rval;
+}
+
+static void
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
+{
+	struct qla_hw_data *ha = vha->hw;
+	fc_port_t *fcport;
+
+	vha->flags.online = 0;
+	ha->mr.fw_hbt_en = 0;
+
+	if (!critemp) {
+		ha->flags.chip_reset_done = 0;
+		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		vha->qla_stats.total_isp_aborts++;
+		ql_log(ql_log_info, vha, 0x013f,
+		    "Performing ISP error recovery - ha = %p.\n", ha);
+		ha->isp_ops->reset_chip(vha);
+	}
+
+	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		atomic_set(&vha->loop_down_timer,
+		    QLAFX00_LOOP_DOWN_TIME);
+	} else {
+		if (!atomic_read(&vha->loop_down_timer))
+			atomic_set(&vha->loop_down_timer,
+			    QLAFX00_LOOP_DOWN_TIME);
+	}
+
+	/* Clear all async request states across all VPs. */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		fcport->flags = 0;
+		if (atomic_read(&fcport->state) == FCS_ONLINE)
+			qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+	}
+
+	if (!ha->flags.eeh_busy) {
+		if (critemp) {
+			qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+		} else {
+			/* Requeue all commands in outstanding command list. */
+			qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+		}
+	}
+
+	qla2x00_free_irqs(vha);
+	if (critemp)
+		set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
+	else
+		set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+
+	/* Clear the Interrupts */
+	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+	ql_log(ql_log_info, vha, 0x0140,
+	    "%s Done done - ha=%p.\n", __func__, ha);
+}
+
+/**
+ * qlafx00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_init_response_q_entries(struct rsp_que *rsp)
+{
+	uint16_t cnt;
+	response_t *pkt;
+
+	rsp->ring_ptr = rsp->ring;
+	rsp->ring_index    = 0;
+	rsp->status_srb = NULL;
+	pkt = rsp->ring_ptr;
+	for (cnt = 0; cnt < rsp->length; cnt++) {
+		pkt->signature = RESPONSE_PROCESSED;
+		WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
+		    RESPONSE_PROCESSED);
+		pkt++;
+	}
+}
+
+int
+qlafx00_rescan_isp(scsi_qla_host_t *vha)
+{
+	uint32_t status = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	uint32_t aenmbx7;
+
+	qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
+
+	aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+	ha->mbx_intr_code = MSW(aenmbx7);
+	ha->rqstq_intr_code = LSW(aenmbx7);
+	ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+	ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+	ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+	ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+
+	ql_dbg(ql_dbg_disc, vha, 0x2094,
+	    "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
+	    " Req que offset 0x%x Rsp que offset 0x%x\n",
+	    ha->mbx_intr_code, ha->rqstq_intr_code,
+	    ha->req_que_off, ha->rsp_que_len);
+
+	/* Clear the Interrupts */
+	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+	status = qla2x00_init_rings(vha);
+	if (!status) {
+		vha->flags.online = 1;
+
+		/* if no cable then assume it's good */
+		if ((vha->device_flags & DFLG_NO_CABLE))
+			status = 0;
+		/* Register system information */
+		if (qlafx00_fx_disc(vha,
+		    &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
+			ql_dbg(ql_dbg_disc, vha, 0x2095,
+			    "failed to register host info\n");
+	}
+	scsi_unblock_requests(vha->host);
+	return status;
+}
+
+void
+qlafx00_timer_routine(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t fw_heart_beat;
+	uint32_t aenmbx0;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	uint32_t tempc;
+
+	/* Check firmware health */
+	if (ha->mr.fw_hbt_cnt)
+		ha->mr.fw_hbt_cnt--;
+	else {
+		if ((!ha->flags.mr_reset_hdlr_active) &&
+		    (!test_bit(UNLOADING, &vha->dpc_flags)) &&
+		    (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+		    (ha->mr.fw_hbt_en)) {
+			fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+			if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
+				ha->mr.old_fw_hbt_cnt = fw_heart_beat;
+				ha->mr.fw_hbt_miss_cnt = 0;
+			} else {
+				ha->mr.fw_hbt_miss_cnt++;
+				if (ha->mr.fw_hbt_miss_cnt ==
+				    QLAFX00_HEARTBEAT_MISS_CNT) {
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+					qla2xxx_wake_dpc(vha);
+					ha->mr.fw_hbt_miss_cnt = 0;
+				}
+			}
+		}
+		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+	}
+
+	if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
+		/* Reset recovery to be performed in timer routine */
+		aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+		if (ha->mr.fw_reset_timer_exp) {
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			ha->mr.fw_reset_timer_exp = 0;
+		} else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
+			/* Wake up DPC to rescan the targets */
+			set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
+			clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		} else if ((aenmbx0 == MBA_FW_STARTING) &&
+		    (!ha->mr.fw_hbt_en)) {
+			ha->mr.fw_hbt_en = 1;
+		} else if (!ha->mr.fw_reset_timer_tick) {
+			if (aenmbx0 == ha->mr.old_aenmbx0_state)
+				ha->mr.fw_reset_timer_exp = 1;
+			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		} else if (aenmbx0 == 0xFFFFFFFF) {
+			uint32_t data0, data1;
+
+			data0 = QLAFX00_RD_REG(ha,
+			    QLAFX00_BAR1_BASE_ADDR_REG);
+			data1 = QLAFX00_RD_REG(ha,
+			    QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
+
+			data0 &= 0xffff0000;
+			data1 &= 0x0000ffff;
+
+			QLAFX00_WR_REG(ha,
+			    QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
+			    (data0 | data1));
+		} else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
+			ha->mr.fw_reset_timer_tick =
+			    QLAFX00_MAX_RESET_INTERVAL;
+		} else if (aenmbx0 == MBA_FW_RESET_FCT) {
+			ha->mr.fw_reset_timer_tick =
+			    QLAFX00_MAX_RESET_INTERVAL;
+		}
+		if (ha->mr.old_aenmbx0_state != aenmbx0) {
+			ha->mr.old_aenmbx0_state = aenmbx0;
+			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		}
+		ha->mr.fw_reset_timer_tick--;
+	}
+	if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
+		/*
+		 * Critical temperature recovery to be
+		 * performed in timer routine
+		 */
+		if (ha->mr.fw_critemp_timer_tick == 0) {
+			tempc = QLAFX00_GET_TEMPERATURE(ha);
+			ql_dbg(ql_dbg_timer, vha, 0x6012,
+			    "ISPFx00(%s): Critical temp timer, "
+			    "current SOC temperature: %d\n",
+			    __func__, tempc);
+			if (tempc < ha->mr.critical_temperature) {
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				clear_bit(FX00_CRITEMP_RECOVERY,
+				    &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+			ha->mr.fw_critemp_timer_tick =
+			    QLAFX00_CRITEMP_INTERVAL;
+		} else {
+			ha->mr.fw_critemp_timer_tick--;
+		}
+	}
+	if (ha->mr.host_info_resend) {
+		/*
+		 * Incomplete host info might be sent to firmware
+		 * durinng system boot - info should be resend
+		 */
+		if (ha->mr.hinfo_resend_timer_tick == 0) {
+			ha->mr.host_info_resend = false;
+			set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
+			ha->mr.hinfo_resend_timer_tick =
+			    QLAFX00_HINFO_RESEND_INTERVAL;
+			qla2xxx_wake_dpc(vha);
+		} else {
+			ha->mr.hinfo_resend_timer_tick--;
+		}
+	}
+
+}
+
+/*
+ *  qlfx00a_reset_initialize
+ *      Re-initialize after a iSA device reset.
+ *
+ * Input:
+ *      ha  = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_reset_initialize(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_dbg(ql_dbg_init, vha, 0x0142,
+		    "Device in failed state\n");
+		return QLA_SUCCESS;
+	}
+
+	ha->flags.mr_reset_hdlr_active = 1;
+
+	if (vha->flags.online) {
+		scsi_block_requests(vha->host);
+		qlafx00_abort_isp_cleanup(vha, false);
+	}
+
+	ql_log(ql_log_info, vha, 0x0143,
+	    "(%s): succeeded.\n", __func__);
+	ha->flags.mr_reset_hdlr_active = 0;
+	return QLA_SUCCESS;
+}
+
+/*
+ *  qlafx00_abort_isp
+ *      Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ *      ha  = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_abort_isp(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->flags.online) {
+		if (unlikely(pci_channel_offline(ha->pdev) &&
+		    ha->flags.pci_channel_io_perm_failure)) {
+			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+			return QLA_SUCCESS;
+		}
+
+		scsi_block_requests(vha->host);
+		qlafx00_abort_isp_cleanup(vha, false);
+	} else {
+		scsi_block_requests(vha->host);
+		clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		vha->qla_stats.total_isp_aborts++;
+		ha->isp_ops->reset_chip(vha);
+		set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+		/* Clear the Interrupts */
+		QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+	}
+
+	ql_log(ql_log_info, vha, 0x0145,
+	    "(%s): succeeded.\n", __func__);
+
+	return QLA_SUCCESS;
+}
+
+static inline fc_port_t*
+qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
+{
+	fc_port_t	*fcport;
+
+	/* Check for matching device in remote port list. */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->tgt_id == tgt_id) {
+			ql_dbg(ql_dbg_async, vha, 0x5072,
+			    "Matching fcport(%p) found with TGT-ID: 0x%x "
+			    "and Remote TGT_ID: 0x%x\n",
+			    fcport, fcport->tgt_id, tgt_id);
+			return fcport;
+		}
+	}
+	return NULL;
+}
+
+static void
+qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
+{
+	fc_port_t	*fcport;
+
+	ql_log(ql_log_info, vha, 0x5073,
+	    "Detach TGT-ID: 0x%x\n", tgt_id);
+
+	fcport = qlafx00_get_fcport(vha, tgt_id);
+	if (!fcport)
+		return;
+
+	qla2x00_mark_device_lost(vha, fcport, 0, 0);
+
+	return;
+}
+
+int
+qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
+{
+	int rval = 0;
+	uint32_t aen_code, aen_data;
+
+	aen_code = FCH_EVT_VENDOR_UNIQUE;
+	aen_data = evt->u.aenfx.evtcode;
+
+	switch (evt->u.aenfx.evtcode) {
+	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
+		if (evt->u.aenfx.mbx[1] == 0) {
+			if (evt->u.aenfx.mbx[2] == 1) {
+				if (!vha->flags.fw_tgt_reported)
+					vha->flags.fw_tgt_reported = 1;
+				atomic_set(&vha->loop_down_timer, 0);
+				atomic_set(&vha->loop_state, LOOP_UP);
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			} else if (evt->u.aenfx.mbx[2] == 2) {
+				qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
+			}
+		} else if (evt->u.aenfx.mbx[1] == 0xffff) {
+			if (evt->u.aenfx.mbx[2] == 1) {
+				if (!vha->flags.fw_tgt_reported)
+					vha->flags.fw_tgt_reported = 1;
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			} else if (evt->u.aenfx.mbx[2] == 2) {
+				vha->device_flags |= DFLG_NO_CABLE;
+				qla2x00_mark_all_devices_lost(vha, 1);
+			}
+		}
+		break;
+	case QLAFX00_MBA_LINK_UP:
+		aen_code = FCH_EVT_LINKUP;
+		aen_data = 0;
+		break;
+	case QLAFX00_MBA_LINK_DOWN:
+		aen_code = FCH_EVT_LINKDOWN;
+		aen_data = 0;
+		break;
+	case QLAFX00_MBA_TEMP_CRIT:	/* Critical temperature event */
+		ql_log(ql_log_info, vha, 0x5082,
+		    "Process critical temperature event "
+		    "aenmb[0]: %x\n",
+		    evt->u.aenfx.evtcode);
+		scsi_block_requests(vha->host);
+		qlafx00_abort_isp_cleanup(vha, true);
+		scsi_unblock_requests(vha->host);
+		break;
+	}
+
+	fc_host_post_event(vha->host, fc_get_event_number(),
+	    aen_code, aen_data);
+
+	return rval;
+}
+
+static void
+qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
+{
+	u64 port_name = 0, node_name = 0;
+
+	port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
+	node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
+
+	fc_host_node_name(vha->host) = node_name;
+	fc_host_port_name(vha->host) = port_name;
+	if (!pinfo->port_type)
+		vha->hw->current_topology = ISP_CFG_F;
+	if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
+		atomic_set(&vha->loop_state, LOOP_READY);
+	else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
+}
+
+static void
+qla2x00_fxdisc_iocb_timeout(void *data)
+{
+	srb_t *sp = data;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+static void
+qla2x00_fxdisc_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+int
+qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
+{
+	srb_t *sp;
+	struct srb_iocb *fdisc;
+	int rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
+	struct host_system_info *phost_info;
+	struct register_host_info *preg_hsi;
+	struct new_utsname *p_sysid = NULL;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	sp->type = SRB_FXIOCB_DCMD;
+	sp->name = "fxdisc";
+	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+
+	fdisc = &sp->u.iocb_cmd;
+	switch (fx_type) {
+	case FXDISC_GET_CONFIG_INFO:
+	fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID;
+		fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
+		break;
+	case FXDISC_GET_PORT_INFO:
+		fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+		fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
+		fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
+		break;
+	case FXDISC_GET_TGT_NODE_INFO:
+		fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+		fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
+		fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
+		break;
+	case FXDISC_GET_TGT_NODE_LIST:
+		fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+		fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
+		break;
+	case FXDISC_REG_HOST_INFO:
+		fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
+		fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
+		p_sysid = utsname();
+		if (!p_sysid) {
+			ql_log(ql_log_warn, vha, 0x303c,
+			    "Not able to get the system information\n");
+			goto done_free_sp;
+		}
+		break;
+	case FXDISC_ABORT_IOCTL:
+	default:
+		break;
+	}
+
+	if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+		fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
+		    fdisc->u.fxiocb.req_len,
+		    &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
+		if (!fdisc->u.fxiocb.req_addr)
+			goto done_free_sp;
+
+		if (fx_type == FXDISC_REG_HOST_INFO) {
+			preg_hsi = (struct register_host_info *)
+				fdisc->u.fxiocb.req_addr;
+			phost_info = &preg_hsi->hsi;
+			memset(preg_hsi, 0, sizeof(struct register_host_info));
+			phost_info->os_type = OS_TYPE_LINUX;
+			strncpy(phost_info->sysname,
+			    p_sysid->sysname, SYSNAME_LENGTH);
+			strncpy(phost_info->nodename,
+			    p_sysid->nodename, NODENAME_LENGTH);
+			if (!strcmp(phost_info->nodename, "(none)"))
+				ha->mr.host_info_resend = true;
+			strncpy(phost_info->release,
+			    p_sysid->release, RELEASE_LENGTH);
+			strncpy(phost_info->version,
+			    p_sysid->version, VERSION_LENGTH);
+			strncpy(phost_info->machine,
+			    p_sysid->machine, MACHINE_LENGTH);
+			strncpy(phost_info->domainname,
+			    p_sysid->domainname, DOMNAME_LENGTH);
+			strncpy(phost_info->hostdriver,
+			    QLA2XXX_VERSION, VERSION_LENGTH);
+			preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
+			ql_dbg(ql_dbg_init, vha, 0x0149,
+			    "ISP%04X: Host registration with firmware\n",
+			    ha->pdev->device);
+			ql_dbg(ql_dbg_init, vha, 0x014a,
+			    "os_type = '%d', sysname = '%s', nodname = '%s'\n",
+			    phost_info->os_type,
+			    phost_info->sysname,
+			    phost_info->nodename);
+			ql_dbg(ql_dbg_init, vha, 0x014b,
+			    "release = '%s', version = '%s'\n",
+			    phost_info->release,
+			    phost_info->version);
+			ql_dbg(ql_dbg_init, vha, 0x014c,
+			    "machine = '%s' "
+			    "domainname = '%s', hostdriver = '%s'\n",
+			    phost_info->machine,
+			    phost_info->domainname,
+			    phost_info->hostdriver);
+			ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
+			    (uint8_t *)phost_info,
+			    sizeof(struct host_system_info));
+		}
+	}
+
+	if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+		fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
+		    fdisc->u.fxiocb.rsp_len,
+		    &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
+		if (!fdisc->u.fxiocb.rsp_addr)
+			goto done_unmap_req;
+	}
+
+	fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+	fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
+	sp->done = qla2x00_fxdisc_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_unmap_dma;
+
+	wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
+
+	if (fx_type == FXDISC_GET_CONFIG_INFO) {
+		struct config_info_data *pinfo =
+		    (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
+		strcpy(vha->hw->model_number, pinfo->model_num);
+		strcpy(vha->hw->model_desc, pinfo->model_description);
+		memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
+		    sizeof(vha->hw->mr.symbolic_name));
+		memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
+		    sizeof(vha->hw->mr.serial_num));
+		memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
+		    sizeof(vha->hw->mr.hw_version));
+		memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
+		    sizeof(vha->hw->mr.fw_version));
+		strim(vha->hw->mr.fw_version);
+		memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
+		    sizeof(vha->hw->mr.uboot_version));
+		memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
+		    sizeof(vha->hw->mr.fru_serial_num));
+		vha->hw->mr.critical_temperature =
+		    (pinfo->nominal_temp_value) ?
+		    pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
+		ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
+		    QLAFX00_EXTENDED_IO_EN_MASK) != 0;
+	} else if (fx_type == FXDISC_GET_PORT_INFO) {
+		struct port_info_data *pinfo =
+		    (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
+		memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
+		memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
+		vha->d_id.b.domain = pinfo->port_id[0];
+		vha->d_id.b.area = pinfo->port_id[1];
+		vha->d_id.b.al_pa = pinfo->port_id[2];
+		qlafx00_update_host_attr(vha, pinfo);
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
+		    (uint8_t *)pinfo, 16);
+	} else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
+		struct qlafx00_tgt_node_info *pinfo =
+		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+		memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
+		memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
+		fcport->port_type = FCT_TARGET;
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
+		    (uint8_t *)pinfo, 16);
+	} else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
+		struct qlafx00_tgt_node_info *pinfo =
+		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
+		    (uint8_t *)pinfo, 16);
+		memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
+	} else if (fx_type == FXDISC_ABORT_IOCTL)
+		fdisc->u.fxiocb.result =
+		    (fdisc->u.fxiocb.result ==
+			cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
+		    cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
+
+	rval = le32_to_cpu(fdisc->u.fxiocb.result);
+
+done_unmap_dma:
+	if (fdisc->u.fxiocb.rsp_addr)
+		dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
+		    fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
+
+done_unmap_req:
+	if (fdisc->u.fxiocb.req_addr)
+		dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
+		    fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
+done_free_sp:
+	sp->free(sp);
+done:
+	return rval;
+}
+
+/*
+ * qlafx00_initialize_adapter
+ *      Initialize board.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_initialize_adapter(scsi_qla_host_t *vha)
+{
+	int	rval;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t tempc;
+
+	/* Clear adapter flags. */
+	vha->flags.online = 0;
+	ha->flags.chip_reset_done = 0;
+	vha->flags.reset_active = 0;
+	ha->flags.pci_channel_io_perm_failure = 0;
+	ha->flags.eeh_busy = 0;
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->device_flags = DFLG_NO_CABLE;
+	vha->dpc_flags = 0;
+	vha->flags.management_server_logged_in = 0;
+	ha->isp_abort_cnt = 0;
+	ha->beacon_blink_led = 0;
+
+	set_bit(0, ha->req_qid_map);
+	set_bit(0, ha->rsp_qid_map);
+
+	ql_dbg(ql_dbg_init, vha, 0x0147,
+	    "Configuring PCI space...\n");
+
+	rval = ha->isp_ops->pci_config(vha);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0148,
+		    "Unable to configure PCI space.\n");
+		return rval;
+	}
+
+	rval = qlafx00_init_fw_ready(vha);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	qlafx00_save_queue_ptrs(vha);
+
+	rval = qlafx00_config_queues(vha);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	/*
+	 * Allocate the array of outstanding commands
+	 * now that we know the firmware resources.
+	 */
+	rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	rval = qla2x00_init_rings(vha);
+	ha->flags.chip_reset_done = 1;
+
+	tempc = QLAFX00_GET_TEMPERATURE(ha);
+	ql_dbg(ql_dbg_init, vha, 0x0152,
+	    "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
+	    __func__, tempc);
+
+	return rval;
+}
+
+uint32_t
+qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int rval = QLA_FUNCTION_FAILED;
+	uint32_t state[1];
+
+	if (qla2x00_reset_active(vha))
+		ql_log(ql_log_warn, vha, 0x70ce,
+		    "ISP reset active.\n");
+	else if (!vha->hw->flags.eeh_busy) {
+		rval = qlafx00_get_firmware_state(vha, state);
+	}
+	if (rval != QLA_SUCCESS)
+		memset(state, -1, sizeof(state));
+
+	return state[0];
+}
+
+void
+qlafx00_get_host_speed(struct Scsi_Host *shost)
+{
+	struct qla_hw_data *ha = ((struct scsi_qla_host *)
+					(shost_priv(shost)))->hw;
+	u32 speed = FC_PORTSPEED_UNKNOWN;
+
+	switch (ha->link_data_rate) {
+	case QLAFX00_PORT_SPEED_2G:
+		speed = FC_PORTSPEED_2GBIT;
+		break;
+	case QLAFX00_PORT_SPEED_4G:
+		speed = FC_PORTSPEED_4GBIT;
+		break;
+	case QLAFX00_PORT_SPEED_8G:
+		speed = FC_PORTSPEED_8GBIT;
+		break;
+	case QLAFX00_PORT_SPEED_10G:
+		speed = FC_PORTSPEED_10GBIT;
+		break;
+	}
+	fc_host_speed(shost) = speed;
+}
+
+/** QLAFX00 specific ISR implementation functions */
+
+static inline void
+qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+		     uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+	struct scsi_qla_host *vha = sp->vha;
+	struct scsi_cmnd *cp = GET_CMD_SP(sp);
+	uint32_t track_sense_len;
+
+	SET_FW_SENSE_LEN(sp, sense_len);
+
+	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+		sense_len = SCSI_SENSE_BUFFERSIZE;
+
+	SET_CMD_SENSE_LEN(sp, sense_len);
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+	track_sense_len = sense_len;
+
+	if (sense_len > par_sense_len)
+		sense_len = par_sense_len;
+
+	memcpy(cp->sense_buffer, sense_data, sense_len);
+
+	SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
+
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+	track_sense_len -= sense_len;
+	SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+	ql_dbg(ql_dbg_io, vha, 0x304d,
+	    "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
+	    sense_len, par_sense_len, track_sense_len);
+	if (GET_FW_SENSE_LEN(sp) > 0) {
+		rsp->status_srb = sp;
+		cp->result = res;
+	}
+
+	if (sense_len) {
+		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
+		    "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
+		    sp->vha->host_no, cp->device->id, cp->device->lun,
+		    cp);
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
+		    cp->sense_buffer, sense_len);
+	}
+}
+
+static void
+qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+		      struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
+		      __le16 sstatus, __le16 cpstatus)
+{
+	struct srb_iocb *tmf;
+
+	tmf = &sp->u.iocb_cmd;
+	if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
+	    (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
+		cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
+	tmf->u.tmf.comp_status = cpstatus;
+	sp->done(sp, 0);
+}
+
+static void
+qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+			 struct abort_iocb_entry_fx00 *pkt)
+{
+	const char func[] = "ABT_IOCB";
+	srb_t *sp;
+	struct srb_iocb *abt;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	abt = &sp->u.iocb_cmd;
+	abt->u.abt.comp_status = pkt->tgt_id_sts;
+	sp->done(sp, 0);
+}
+
+static void
+qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
+			 struct ioctl_iocb_entry_fx00 *pkt)
+{
+	const char func[] = "IOSB_IOCB";
+	srb_t *sp;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_reply *bsg_reply;
+	struct srb_iocb *iocb_job;
+	int res;
+	struct qla_mt_iocb_rsp_fx00 fstatus;
+	uint8_t	*fw_sts_ptr;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	if (sp->type == SRB_FXIOCB_DCMD) {
+		iocb_job = &sp->u.iocb_cmd;
+		iocb_job->u.fxiocb.seq_number = pkt->seq_no;
+		iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
+		iocb_job->u.fxiocb.result = pkt->status;
+		if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
+			iocb_job->u.fxiocb.req_data =
+			    pkt->dataword_r;
+	} else {
+		bsg_job = sp->u.bsg_job;
+		bsg_reply = bsg_job->reply;
+
+		memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+		fstatus.reserved_1 = pkt->reserved_0;
+		fstatus.func_type = pkt->comp_func_num;
+		fstatus.ioctl_flags = pkt->fw_iotcl_flags;
+		fstatus.ioctl_data = pkt->dataword_r;
+		fstatus.adapid = pkt->adapid;
+		fstatus.reserved_2 = pkt->dataword_r_extra;
+		fstatus.res_count = pkt->residuallen;
+		fstatus.status = pkt->status;
+		fstatus.seq_number = pkt->seq_no;
+		memcpy(fstatus.reserved_3,
+		    pkt->reserved_2, 20 * sizeof(uint8_t));
+
+		fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
+		    sizeof(struct fc_bsg_reply);
+
+		memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
+		    sizeof(struct qla_mt_iocb_rsp_fx00));
+		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+			sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
+
+		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+		    sp->fcport->vha, 0x5080,
+		    (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+
+		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+		    sp->fcport->vha, 0x5074,
+		    (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+		res = bsg_reply->result = DID_OK << 16;
+		bsg_reply->reply_payload_rcv_len =
+		    bsg_job->reply_payload.payload_len;
+	}
+	sp->done(sp, res);
+}
+
+/**
+ * qlafx00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+	srb_t		*sp;
+	fc_port_t	*fcport;
+	struct scsi_cmnd *cp;
+	struct sts_entry_fx00 *sts;
+	__le16		comp_status;
+	__le16		scsi_status;
+	__le16		lscsi_status;
+	int32_t		resid;
+	uint32_t	sense_len, par_sense_len, rsp_info_len, resid_len,
+	    fw_resid_len;
+	uint8_t		*rsp_info = NULL, *sense_data = NULL;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t hindex, handle;
+	uint16_t que;
+	struct req_que *req;
+	int logit = 1;
+	int res = 0;
+
+	sts = (struct sts_entry_fx00 *) pkt;
+
+	comp_status = sts->comp_status;
+	scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
+	hindex = sts->handle;
+	handle = LSW(hindex);
+
+	que = MSW(hindex);
+	req = ha->req_q_map[que];
+
+	/* Validate handle. */
+	if (handle < req->num_outstanding_cmds)
+		sp = req->outstanding_cmds[handle];
+	else
+		sp = NULL;
+
+	if (sp == NULL) {
+		ql_dbg(ql_dbg_io, vha, 0x3034,
+		    "Invalid status handle (0x%x).\n", handle);
+
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		return;
+	}
+
+	if (sp->type == SRB_TM_CMD) {
+		req->outstanding_cmds[handle] = NULL;
+		qlafx00_tm_iocb_entry(vha, req, pkt, sp,
+		    scsi_status, comp_status);
+		return;
+	}
+
+	/* Fast path completion. */
+	if (comp_status == CS_COMPLETE && scsi_status == 0) {
+		qla2x00_process_completed_request(vha, req, handle);
+		return;
+	}
+
+	req->outstanding_cmds[handle] = NULL;
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_dbg(ql_dbg_io, vha, 0x3048,
+		    "Command already returned (0x%x/%p).\n",
+		    handle, sp);
+
+		return;
+	}
+
+	lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
+
+	fcport = sp->fcport;
+
+	sense_len = par_sense_len = rsp_info_len = resid_len =
+		fw_resid_len = 0;
+	if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
+		sense_len = sts->sense_len;
+	if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
+	    | (uint16_t)SS_RESIDUAL_OVER)))
+		resid_len = le32_to_cpu(sts->residual_len);
+	if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
+		fw_resid_len = le32_to_cpu(sts->residual_len);
+	rsp_info = sense_data = sts->data;
+	par_sense_len = sizeof(sts->data);
+
+	/* Check for overrun. */
+	if (comp_status == CS_COMPLETE &&
+	    scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
+		comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
+
+	/*
+	 * Based on Host and scsi status generate status code for Linux
+	 */
+	switch (le16_to_cpu(comp_status)) {
+	case CS_COMPLETE:
+	case CS_QUEUE_FULL:
+		if (scsi_status == 0) {
+			res = DID_OK << 16;
+			break;
+		}
+		if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
+		    | (uint16_t)SS_RESIDUAL_OVER))) {
+			resid = resid_len;
+			scsi_set_resid(cp, resid);
+
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			     cp->underflow)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
+				    "Mid-layer underflow "
+				    "detected (0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16;
+				break;
+			}
+		}
+		res = DID_OK << 16 | le16_to_cpu(lscsi_status);
+
+		if (lscsi_status ==
+		    cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
+			    "QUEUE FULL detected.\n");
+			break;
+		}
+		logit = 0;
+		if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
+			break;
+
+		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+		if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
+			break;
+
+		qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+		    rsp, res);
+		break;
+
+	case CS_DATA_UNDERRUN:
+		/* Use F/W calculated residual length. */
+		if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+			resid = fw_resid_len;
+		else
+			resid = resid_len;
+		scsi_set_resid(cp, resid);
+		if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
+			if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+			    && fw_resid_len != resid_len) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
+				    "Dropped frame(s) detected "
+				    "(0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16 |
+				    le16_to_cpu(lscsi_status);
+				goto check_scsi_status;
+			}
+
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			    cp->underflow)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
+				    "Mid-layer underflow "
+				    "detected (0x%x of 0x%x bytes, "
+				    "cp->underflow: 0x%x).\n",
+				    resid, scsi_bufflen(cp), cp->underflow);
+
+				res = DID_ERROR << 16;
+				break;
+			}
+		} else if (lscsi_status !=
+		    cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
+		    lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
+			/*
+			 * scsi status of task set and busy are considered
+			 * to be task not completed.
+			 */
+
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
+			    "Dropped frame(s) detected (0x%x "
+			    "of 0x%x bytes).\n", resid,
+			    scsi_bufflen(cp));
+
+			res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
+			goto check_scsi_status;
+		} else {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
+			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+			    scsi_status, lscsi_status);
+		}
+
+		res = DID_OK << 16 | le16_to_cpu(lscsi_status);
+		logit = 0;
+
+check_scsi_status:
+		/*
+		 * Check to see if SCSI Status is non zero. If so report SCSI
+		 * Status.
+		 */
+		if (lscsi_status != 0) {
+			if (lscsi_status ==
+			    cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
+				    "QUEUE FULL detected.\n");
+				logit = 1;
+				break;
+			}
+			if (lscsi_status !=
+			    cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
+				break;
+
+			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+			if (!(scsi_status &
+			    cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
+				break;
+
+			qlafx00_handle_sense(sp, sense_data, par_sense_len,
+			    sense_len, rsp, res);
+		}
+		break;
+
+	case CS_PORT_LOGGED_OUT:
+	case CS_PORT_CONFIG_CHG:
+	case CS_PORT_BUSY:
+	case CS_INCOMPLETE:
+	case CS_PORT_UNAVAILABLE:
+	case CS_TIMEOUT:
+	case CS_RESET:
+
+		/*
+		 * We are going to have the fc class block the rport
+		 * while we try to recover so instruct the mid layer
+		 * to requeue until the class decides how to handle this.
+		 */
+		res = DID_TRANSPORT_DISRUPTED << 16;
+
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
+		    "Port down status: port-state=0x%x.\n",
+		    atomic_read(&fcport->state));
+
+		if (atomic_read(&fcport->state) == FCS_ONLINE)
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+		break;
+
+	case CS_ABORTED:
+		res = DID_RESET << 16;
+		break;
+
+	default:
+		res = DID_ERROR << 16;
+		break;
+	}
+
+	if (logit)
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
+		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
+		    "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
+		    "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+		    "par_sense_len=0x%x, rsp_info_len=0x%x\n",
+		    comp_status, scsi_status, res, vha->host_no,
+		    cp->device->id, cp->device->lun, fcport->tgt_id,
+		    lscsi_status, cp->cmnd, scsi_bufflen(cp),
+		    rsp_info, resid_len, fw_resid_len, sense_len,
+		    par_sense_len, rsp_info_len);
+
+	if (rsp->status_srb == NULL)
+		sp->done(sp, res);
+}
+
+/**
+ * qlafx00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+	uint8_t	sense_sz = 0;
+	struct qla_hw_data *ha = rsp->hw;
+	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+	srb_t *sp = rsp->status_srb;
+	struct scsi_cmnd *cp;
+	uint32_t sense_len;
+	uint8_t *sense_ptr;
+
+	if (!sp) {
+		ql_dbg(ql_dbg_io, vha, 0x3037,
+		    "no SP, sp = %p\n", sp);
+		return;
+	}
+
+	if (!GET_FW_SENSE_LEN(sp)) {
+		ql_dbg(ql_dbg_io, vha, 0x304b,
+		    "no fw sense data, sp = %p\n", sp);
+		return;
+	}
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_log(ql_log_warn, vha, 0x303b,
+		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+		rsp->status_srb = NULL;
+		return;
+	}
+
+	if (!GET_CMD_SENSE_LEN(sp)) {
+		ql_dbg(ql_dbg_io, vha, 0x304c,
+		    "no sense data, sp = %p\n", sp);
+	} else {
+		sense_len = GET_CMD_SENSE_LEN(sp);
+		sense_ptr = GET_CMD_SENSE_PTR(sp);
+		ql_dbg(ql_dbg_io, vha, 0x304f,
+		    "sp=%p sense_len=0x%x sense_ptr=%p.\n",
+		    sp, sense_len, sense_ptr);
+
+		if (sense_len > sizeof(pkt->data))
+			sense_sz = sizeof(pkt->data);
+		else
+			sense_sz = sense_len;
+
+		/* Move sense data. */
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
+		    (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+		memcpy(sense_ptr, pkt->data, sense_sz);
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
+		    sense_ptr, sense_sz);
+
+		sense_len -= sense_sz;
+		sense_ptr += sense_sz;
+
+		SET_CMD_SENSE_PTR(sp, sense_ptr);
+		SET_CMD_SENSE_LEN(sp, sense_len);
+	}
+	sense_len = GET_FW_SENSE_LEN(sp);
+	sense_len = (sense_len > sizeof(pkt->data)) ?
+	    (sense_len - sizeof(pkt->data)) : 0;
+	SET_FW_SENSE_LEN(sp, sense_len);
+
+	/* Place command on done queue. */
+	if (sense_len == 0) {
+		rsp->status_srb = NULL;
+		sp->done(sp, cp->result);
+	}
+}
+
+/**
+ * qlafx00_multistatus_entry() - Process Multi response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+	struct rsp_que *rsp, void *pkt)
+{
+	srb_t		*sp;
+	struct multi_sts_entry_fx00 *stsmfx;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t handle, hindex, handle_count, i;
+	uint16_t que;
+	struct req_que *req;
+	__le32 *handle_ptr;
+
+	stsmfx = (struct multi_sts_entry_fx00 *) pkt;
+
+	handle_count = stsmfx->handle_count;
+
+	if (handle_count > MAX_HANDLE_COUNT) {
+		ql_dbg(ql_dbg_io, vha, 0x3035,
+		    "Invalid handle count (0x%x).\n", handle_count);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		return;
+	}
+
+	handle_ptr =  &stsmfx->handles[0];
+
+	for (i = 0; i < handle_count; i++) {
+		hindex = le32_to_cpu(*handle_ptr);
+		handle = LSW(hindex);
+		que = MSW(hindex);
+		req = ha->req_q_map[que];
+
+		/* Validate handle. */
+		if (handle < req->num_outstanding_cmds)
+			sp = req->outstanding_cmds[handle];
+		else
+			sp = NULL;
+
+		if (sp == NULL) {
+			ql_dbg(ql_dbg_io, vha, 0x3044,
+			    "Invalid status handle (0x%x).\n", handle);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			return;
+		}
+		qla2x00_process_completed_request(vha, req, handle);
+		handle_ptr++;
+	}
+}
+
+/**
+ * qlafx00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
+		    struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	const char func[] = "ERROR-IOCB";
+	uint16_t que = 0;
+	struct req_que *req = NULL;
+	int res = DID_ERROR << 16;
+
+	ql_dbg(ql_dbg_async, vha, 0x507f,
+	    "type of error status in response: 0x%x\n", estatus);
+
+	req = ha->req_q_map[que];
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (sp) {
+		sp->done(sp, res);
+		return;
+	}
+
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+}
+
+/**
+ * qlafx00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_process_response_queue(struct scsi_qla_host *vha,
+	struct rsp_que *rsp)
+{
+	struct sts_entry_fx00 *pkt;
+	response_t *lptr;
+	uint16_t lreq_q_in = 0;
+	uint16_t lreq_q_out = 0;
+
+	lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+	lreq_q_out = rsp->ring_index;
+
+	while (lreq_q_in != lreq_q_out) {
+		lptr = rsp->ring_ptr;
+		memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
+		    sizeof(rsp->rsp_pkt));
+		pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
+
+		rsp->ring_index++;
+		lreq_q_out++;
+		if (rsp->ring_index == rsp->length) {
+			lreq_q_out = 0;
+			rsp->ring_index = 0;
+			rsp->ring_ptr = rsp->ring;
+		} else {
+			rsp->ring_ptr++;
+		}
+
+		if (pkt->entry_status != 0 &&
+		    pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+			qlafx00_error_entry(vha, rsp,
+			    (struct sts_entry_fx00 *)pkt, pkt->entry_status,
+			    pkt->entry_type);
+			continue;
+		}
+
+		switch (pkt->entry_type) {
+		case STATUS_TYPE_FX00:
+			qlafx00_status_entry(vha, rsp, pkt);
+			break;
+
+		case STATUS_CONT_TYPE_FX00:
+			qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+			break;
+
+		case MULTI_STATUS_TYPE_FX00:
+			qlafx00_multistatus_entry(vha, rsp, pkt);
+			break;
+
+		case ABORT_IOCB_TYPE_FX00:
+			qlafx00_abort_iocb_entry(vha, rsp->req,
+			   (struct abort_iocb_entry_fx00 *)pkt);
+			break;
+
+		case IOCTL_IOSB_TYPE_FX00:
+			qlafx00_ioctl_iosb_entry(vha, rsp->req,
+			    (struct ioctl_iocb_entry_fx00 *)pkt);
+			break;
+		default:
+			/* Type Not Supported. */
+			ql_dbg(ql_dbg_async, vha, 0x5081,
+			    "Received unknown response pkt type %x "
+			    "entry status=%x.\n",
+			    pkt->entry_type, pkt->entry_status);
+			break;
+		}
+	}
+
+	/* Adjust ring index */
+	WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+}
+
+/**
+ * qlafx00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_async_event(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg;
+	int data_size = 1;
+
+	reg = &ha->iobase->ispfx00;
+	/* Setup to process RIO completion. */
+	switch (ha->aenmb[0]) {
+	case QLAFX00_MBA_SYSTEM_ERR:		/* System Error */
+		ql_log(ql_log_warn, vha, 0x5079,
+		    "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case QLAFX00_MBA_SHUTDOWN_RQSTD:	/* Shutdown requested */
+		ql_dbg(ql_dbg_async, vha, 0x5076,
+		    "Asynchronous FW shutdown requested.\n");
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		break;
+
+	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
+		ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
+		ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
+		ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
+		ql_dbg(ql_dbg_async, vha, 0x5077,
+		    "Asynchronous port Update received "
+		    "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
+		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
+		data_size = 4;
+		break;
+
+	case QLAFX00_MBA_TEMP_OVER:	/* Over temperature event */
+		ql_log(ql_log_info, vha, 0x5085,
+		    "Asynchronous over temperature event received "
+		    "aenmb[0]: %x\n",
+		    ha->aenmb[0]);
+		break;
+
+	case QLAFX00_MBA_TEMP_NORM:	/* Normal temperature event */
+		ql_log(ql_log_info, vha, 0x5086,
+		    "Asynchronous normal temperature event received "
+		    "aenmb[0]: %x\n",
+		    ha->aenmb[0]);
+		break;
+
+	case QLAFX00_MBA_TEMP_CRIT:	/* Critical temperature event */
+		ql_log(ql_log_info, vha, 0x5083,
+		    "Asynchronous critical temperature event received "
+		    "aenmb[0]: %x\n",
+		ha->aenmb[0]);
+		break;
+
+	default:
+		ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+		ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+		ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+		ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
+		ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
+		ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
+		ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+		ql_dbg(ql_dbg_async, vha, 0x5078,
+		    "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
+		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
+		    ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
+		break;
+	}
+	qlafx00_post_aenfx_work(vha, ha->aenmb[0],
+	    (uint32_t *)ha->aenmb, data_size);
+}
+
+/**
+ *
+ * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb16: Mailbox16 register
+ */
+static void
+qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
+{
+	uint16_t	cnt;
+	uint32_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+	if (!ha->mcp32)
+		ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
+
+	/* Load return mailbox registers. */
+	ha->flags.mbox_int = 1;
+	ha->mailbox_out32[0] = mb0;
+	wptr = (uint32_t __iomem *)&reg->mailbox17;
+
+	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+		ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+		wptr++;
+	}
+}
+
+/**
+ * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qlafx00_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct device_reg_fx00 __iomem *reg;
+	int		status;
+	unsigned long	iter;
+	uint32_t	stat;
+	uint32_t	mb[8];
+	struct rsp_que *rsp;
+	unsigned long	flags;
+	uint32_t clr_intr = 0;
+	uint32_t intr_stat = 0;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x507d,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+
+	ha = rsp->hw;
+	reg = &ha->iobase->ispfx00;
+	status = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	for (iter = 50; iter--; clr_intr = 0) {
+		stat = QLAFX00_RD_INTR_REG(ha);
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
+			break;
+		intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
+		if (!intr_stat)
+			break;
+
+		if (stat & QLAFX00_INTR_MB_CMPLT) {
+			mb[0] = RD_REG_WORD(&reg->mailbox16);
+			qlafx00_mbx_completion(vha, mb[0]);
+			status |= MBX_INTERRUPT;
+			clr_intr |= QLAFX00_INTR_MB_CMPLT;
+		}
+		if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
+			ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+			qlafx00_async_event(vha);
+			clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
+		}
+		if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
+			qlafx00_process_response_queue(vha, rsp);
+			clr_intr |= QLAFX00_INTR_RSP_CMPLT;
+		}
+
+		QLAFX00_CLR_INTR_REG(ha, clr_intr);
+		QLAFX00_RD_INTR_REG(ha);
+	}
+
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/** QLAFX00 specific IOCB implementation functions */
+
+static inline cont_a64_entry_t *
+qlafx00_prep_cont_type1_iocb(struct req_que *req,
+			     cont_a64_entry_t *lcont_pkt)
+{
+	cont_a64_entry_t *cont_pkt;
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+
+	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+	/* Load packet defaults. */
+	lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
+
+	return cont_pkt;
+}
+
+static inline void
+qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
+			 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
+{
+	uint16_t	avail_dsds;
+	__le32 *cur_dsd;
+	scsi_qla_host_t	*vha;
+	struct scsi_cmnd *cmd;
+	struct scatterlist *sg;
+	int i, cont;
+	struct req_que *req;
+	cont_a64_entry_t lcont_pkt;
+	cont_a64_entry_t *cont_pkt;
+
+	vha = sp->vha;
+	req = vha->req;
+
+	cmd = GET_CMD_SP(sp);
+	cont = 0;
+	cont_pkt = NULL;
+
+	/* Update entry type to indicate Command Type 3 IOCB */
+	lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		lcmd_pkt->byte_count = cpu_to_le32(0);
+		return;
+	}
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		lcmd_pkt->cntrl_flags = TMF_READ_DATA;
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+	}
+
+	/* One DSD is available in the Command Type 3 IOCB */
+	avail_dsds = 1;
+	cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
+
+	/* Load data segments */
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+		dma_addr_t	sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Continuation
+			 * Type 1 IOCB.
+			 */
+			memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
+			cont_pkt =
+			    qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
+			cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
+			avail_dsds = 5;
+			cont = 1;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+		if (avail_dsds == 0 && cont == 1) {
+			cont = 0;
+			memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+			    REQUEST_ENTRY_SIZE);
+		}
+
+	}
+	if (avail_dsds != 0 && cont == 1) {
+		memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+		    REQUEST_ENTRY_SIZE);
+	}
+}
+
+/**
+ * qlafx00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qlafx00_start_scsi(srb_t *sp)
+{
+	int		nseg;
+	unsigned long   flags;
+	uint32_t        index;
+	uint32_t	handle;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host *vha = sp->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct cmd_type_7_fx00 *cmd_pkt;
+	struct cmd_type_7_fx00 lcmd_pkt;
+	struct scsi_lun llun;
+
+	/* Setup device pointers. */
+	rsp = ha->rsp_q_map[0];
+	req = vha->req;
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
+
+	memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
+
+	lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+	lcmd_pkt.reserved_0 = 0;
+	lcmd_pkt.port_path_ctrl = 0;
+	lcmd_pkt.reserved_1 = 0;
+	lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
+	lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
+
+	int_to_scsilun(cmd->device->lun, &llun);
+	host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
+	    sizeof(lcmd_pkt.lun));
+
+	/* Load SCSI command packet. */
+	host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
+	lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+	/* Build IOCB segments */
+	qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
+
+	/* Set total data segment count. */
+	lcmd_pkt.entry_count = (uint8_t)req_cnt;
+
+	/* Specify response queue number where completion should happen */
+	lcmd_pkt.entry_status = (uint8_t) rsp->id;
+
+	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+	    (uint8_t *)cmd->cmnd, cmd->cmd_len);
+	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
+	    (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+
+	memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+	QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_FUNCTION_FAILED;
+}
+
+void
+qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
+{
+	struct srb_iocb *fxio = &sp->u.iocb_cmd;
+	scsi_qla_host_t *vha = sp->vha;
+	struct req_que *req = vha->req;
+	struct tsk_mgmt_entry_fx00 tm_iocb;
+	struct scsi_lun llun;
+
+	memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
+	tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
+	tm_iocb.entry_count = 1;
+	tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	tm_iocb.reserved_0 = 0;
+	tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
+	tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
+	if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
+		int_to_scsilun(fxio->u.tmf.lun, &llun);
+		host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
+		    sizeof(struct scsi_lun));
+	}
+
+	memcpy((void *)ptm_iocb, &tm_iocb,
+	    sizeof(struct tsk_mgmt_entry_fx00));
+	wmb();
+}
+
+void
+qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
+{
+	struct srb_iocb *fxio = &sp->u.iocb_cmd;
+	scsi_qla_host_t *vha = sp->vha;
+	struct req_que *req = vha->req;
+	struct abort_iocb_entry_fx00 abt_iocb;
+
+	memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
+	abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
+	abt_iocb.entry_count = 1;
+	abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	abt_iocb.abort_handle =
+	    cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+	abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
+	abt_iocb.req_que_no = cpu_to_le16(req->id);
+
+	memcpy((void *)pabt_iocb, &abt_iocb,
+	    sizeof(struct abort_iocb_entry_fx00));
+	wmb();
+}
+
+void
+qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
+{
+	struct srb_iocb *fxio = &sp->u.iocb_cmd;
+	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_request *bsg_request;
+	struct fxdisc_entry_fx00 fx_iocb;
+	uint8_t entry_cnt = 1;
+
+	memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
+	fx_iocb.entry_type = FX00_IOCB_TYPE;
+	fx_iocb.handle = cpu_to_le32(sp->handle);
+	fx_iocb.entry_count = entry_cnt;
+
+	if (sp->type == SRB_FXIOCB_DCMD) {
+		fx_iocb.func_num =
+		    sp->u.iocb_cmd.u.fxiocb.req_func_type;
+		fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
+		fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
+		fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
+		fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
+		fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
+
+		if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+			fx_iocb.req_dsdcnt = cpu_to_le16(1);
+			fx_iocb.req_xfrcnt =
+			    cpu_to_le16(fxio->u.fxiocb.req_len);
+			fx_iocb.dseg_rq_address[0] =
+			    cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
+			fx_iocb.dseg_rq_address[1] =
+			    cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
+			fx_iocb.dseg_rq_len =
+			    cpu_to_le32(fxio->u.fxiocb.req_len);
+		}
+
+		if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+			fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
+			fx_iocb.rsp_xfrcnt =
+			    cpu_to_le16(fxio->u.fxiocb.rsp_len);
+			fx_iocb.dseg_rsp_address[0] =
+			    cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
+			fx_iocb.dseg_rsp_address[1] =
+			    cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
+			fx_iocb.dseg_rsp_len =
+			    cpu_to_le32(fxio->u.fxiocb.rsp_len);
+		}
+
+		if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
+			fx_iocb.dataword = fxio->u.fxiocb.req_data;
+		}
+		fx_iocb.flags = fxio->u.fxiocb.flags;
+	} else {
+		struct scatterlist *sg;
+		bsg_job = sp->u.bsg_job;
+		bsg_request = bsg_job->request;
+		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+			&bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+
+		fx_iocb.func_num = piocb_rqst->func_type;
+		fx_iocb.adapid = piocb_rqst->adapid;
+		fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
+		fx_iocb.reserved_0 = piocb_rqst->reserved_0;
+		fx_iocb.reserved_1 = piocb_rqst->reserved_1;
+		fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
+		fx_iocb.dataword = piocb_rqst->dataword;
+		fx_iocb.req_xfrcnt = piocb_rqst->req_len;
+		fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
+
+		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+			int avail_dsds, tot_dsds;
+			cont_a64_entry_t lcont_pkt;
+			cont_a64_entry_t *cont_pkt = NULL;
+			__le32 *cur_dsd;
+			int index = 0, cont = 0;
+
+			fx_iocb.req_dsdcnt =
+			    cpu_to_le16(bsg_job->request_payload.sg_cnt);
+			tot_dsds =
+			    bsg_job->request_payload.sg_cnt;
+			cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
+			avail_dsds = 1;
+			for_each_sg(bsg_job->request_payload.sg_list, sg,
+			    tot_dsds, index) {
+				dma_addr_t sle_dma;
+
+				/* Allocate additional continuation packets? */
+				if (avail_dsds == 0) {
+					/*
+					 * Five DSDs are available in the Cont.
+					 * Type 1 IOCB.
+					 */
+					memset(&lcont_pkt, 0,
+					    REQUEST_ENTRY_SIZE);
+					cont_pkt =
+					    qlafx00_prep_cont_type1_iocb(
+						sp->vha->req, &lcont_pkt);
+					cur_dsd = (__le32 *)
+					    lcont_pkt.dseg_0_address;
+					avail_dsds = 5;
+					cont = 1;
+					entry_cnt++;
+				}
+
+				sle_dma = sg_dma_address(sg);
+				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+				avail_dsds--;
+
+				if (avail_dsds == 0 && cont == 1) {
+					cont = 0;
+					memcpy_toio(
+					    (void __iomem *)cont_pkt,
+					    &lcont_pkt, REQUEST_ENTRY_SIZE);
+					ql_dump_buffer(
+					    ql_dbg_user + ql_dbg_verbose,
+					    sp->vha, 0x3042,
+					    (uint8_t *)&lcont_pkt,
+					     REQUEST_ENTRY_SIZE);
+				}
+			}
+			if (avail_dsds != 0 && cont == 1) {
+				memcpy_toio((void __iomem *)cont_pkt,
+				    &lcont_pkt, REQUEST_ENTRY_SIZE);
+				ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+				    sp->vha, 0x3043,
+				    (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+			}
+		}
+
+		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+			int avail_dsds, tot_dsds;
+			cont_a64_entry_t lcont_pkt;
+			cont_a64_entry_t *cont_pkt = NULL;
+			__le32 *cur_dsd;
+			int index = 0, cont = 0;
+
+			fx_iocb.rsp_dsdcnt =
+			   cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+			tot_dsds = bsg_job->reply_payload.sg_cnt;
+			cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
+			avail_dsds = 1;
+
+			for_each_sg(bsg_job->reply_payload.sg_list, sg,
+			    tot_dsds, index) {
+				dma_addr_t sle_dma;
+
+				/* Allocate additional continuation packets? */
+				if (avail_dsds == 0) {
+					/*
+					* Five DSDs are available in the Cont.
+					* Type 1 IOCB.
+					*/
+					memset(&lcont_pkt, 0,
+					    REQUEST_ENTRY_SIZE);
+					cont_pkt =
+					    qlafx00_prep_cont_type1_iocb(
+						sp->vha->req, &lcont_pkt);
+					cur_dsd = (__le32 *)
+					    lcont_pkt.dseg_0_address;
+					avail_dsds = 5;
+					cont = 1;
+					entry_cnt++;
+				}
+
+				sle_dma = sg_dma_address(sg);
+				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+				avail_dsds--;
+
+				if (avail_dsds == 0 && cont == 1) {
+					cont = 0;
+					memcpy_toio((void __iomem *)cont_pkt,
+					    &lcont_pkt,
+					    REQUEST_ENTRY_SIZE);
+					ql_dump_buffer(
+					    ql_dbg_user + ql_dbg_verbose,
+					    sp->vha, 0x3045,
+					    (uint8_t *)&lcont_pkt,
+					    REQUEST_ENTRY_SIZE);
+				}
+			}
+			if (avail_dsds != 0 && cont == 1) {
+				memcpy_toio((void __iomem *)cont_pkt,
+				    &lcont_pkt, REQUEST_ENTRY_SIZE);
+				ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+				    sp->vha, 0x3046,
+				    (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+			}
+		}
+
+		if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
+			fx_iocb.dataword = piocb_rqst->dataword;
+		fx_iocb.flags = piocb_rqst->flags;
+		fx_iocb.entry_count = entry_cnt;
+	}
+
+	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+	    sp->vha, 0x3047,
+	    (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+
+	memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
+	    sizeof(struct fxdisc_entry_fx00));
+	wmb();
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mr.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mr.h
new file mode 100644
index 0000000..aeaa1b4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_mr.h
@@ -0,0 +1,527 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_MR_H
+#define __QLA_MR_H
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISPF001		0xF001
+
+/* FX00 specific definitions */
+
+#define FX00_COMMAND_TYPE_7	0x07	/* Command Type 7 entry for 7XXX */
+struct cmd_type_7_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint8_t reserved_0;
+	uint8_t port_path_ctrl;
+	uint16_t reserved_1;
+
+	__le16 tgt_idx;		/* Target Idx. */
+	uint16_t timeout;		/* Command timeout. */
+
+	__le16 dseg_count;		/* Data segment count. */
+	uint8_t	scsi_rsp_dsd_len;
+	uint8_t reserved_2;
+
+	struct scsi_lun lun;		/* LUN (LE). */
+
+	uint8_t cntrl_flags;
+
+	uint8_t task_mgmt_flags;	/* Task management flags. */
+
+	uint8_t task;
+
+	uint8_t crn;
+
+	uint8_t fcp_cdb[MAX_CMDSZ];	/* SCSI command words. */
+	__le32 byte_count;		/* Total byte count. */
+
+	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_0_len;		/* Data segment 0 length. */
+};
+
+#define	STATUS_TYPE_FX00	0x01		/* Status entry. */
+struct sts_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t reserved_3;		/* System handle. */
+
+	__le16 comp_status;		/* Completion status. */
+	uint16_t reserved_0;		/* OX_ID used by the firmware. */
+
+	__le32 residual_len;		/* FW calc residual transfer length. */
+
+	uint16_t reserved_1;
+	uint16_t state_flags;		/* State flags. */
+
+	uint16_t reserved_2;
+	__le16 scsi_status;		/* SCSI status. */
+
+	uint32_t sense_len;		/* FCP SENSE length. */
+	uint8_t data[32];		/* FCP response/sense information. */
+};
+
+
+#define MAX_HANDLE_COUNT	15
+#define MULTI_STATUS_TYPE_FX00	0x0D
+
+struct multi_sts_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;
+	uint8_t entry_status;
+
+	__le32 handles[MAX_HANDLE_COUNT];
+};
+
+#define TSK_MGMT_IOCB_TYPE_FX00		0x05
+struct tsk_mgmt_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;
+	uint8_t entry_status;		/* Entry Status. */
+
+	__le32 handle;		/* System handle. */
+
+	uint32_t reserved_0;
+
+	__le16 tgt_id;		/* Target Idx. */
+
+	uint16_t reserved_1;
+	uint16_t reserved_3;
+	uint16_t reserved_4;
+
+	struct scsi_lun lun;		/* LUN (LE). */
+
+	__le32 control_flags;		/* Control Flags. */
+
+	uint8_t reserved_2[32];
+};
+
+
+#define	ABORT_IOCB_TYPE_FX00	0x08		/* Abort IOCB status. */
+struct abort_iocb_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	__le32 handle;		/* System handle. */
+	__le32 reserved_0;
+
+	__le16 tgt_id_sts;		/* Completion status. */
+	__le16 options;
+
+	__le32 abort_handle;		/* System handle. */
+	__le32 reserved_2;
+
+	__le16 req_que_no;
+	uint8_t reserved_1[38];
+};
+
+#define IOCTL_IOSB_TYPE_FX00	0x0C
+struct ioctl_iocb_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t reserved_0;		/* System handle. */
+
+	uint16_t comp_func_num;
+	__le16 fw_iotcl_flags;
+
+	__le32 dataword_r;		/* Data word returned */
+	uint32_t adapid;		/* Adapter ID */
+	uint32_t dataword_r_extra;
+
+	__le32 seq_no;
+	uint8_t reserved_2[20];
+	uint32_t residuallen;
+	__le32 status;
+};
+
+#define STATUS_CONT_TYPE_FX00 0x04
+
+#define FX00_IOCB_TYPE		0x0B
+struct fxdisc_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System Defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	__le32 handle;		/* System handle. */
+	__le32 reserved_0;		/* System handle. */
+
+	__le16 func_num;
+	__le16 req_xfrcnt;
+	__le16 req_dsdcnt;
+	__le16 rsp_xfrcnt;
+	__le16 rsp_dsdcnt;
+	uint8_t flags;
+	uint8_t reserved_1;
+
+	__le32 dseg_rq_address[2];	/* Data segment 0 address. */
+	__le32 dseg_rq_len;		/* Data segment 0 length. */
+	__le32 dseg_rsp_address[2];	/* Data segment 1 address. */
+	__le32 dseg_rsp_len;		/* Data segment 1 length. */
+
+	__le32 dataword;
+	__le32 adapid;
+	__le32 adapid_hi;
+	__le32 dataword_extra;
+};
+
+struct qlafx00_tgt_node_info {
+	uint8_t tgt_node_wwpn[WWN_SIZE];
+	uint8_t tgt_node_wwnn[WWN_SIZE];
+	uint32_t tgt_node_state;
+	uint8_t reserved[128];
+	uint32_t reserved_1[8];
+	uint64_t reserved_2[4];
+} __packed;
+
+#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
+
+#define QLAFX00_LINK_STATUS_DOWN	0x10
+#define QLAFX00_LINK_STATUS_UP		0x11
+
+#define QLAFX00_PORT_SPEED_2G	0x2
+#define QLAFX00_PORT_SPEED_4G	0x4
+#define QLAFX00_PORT_SPEED_8G	0x8
+#define QLAFX00_PORT_SPEED_10G	0xa
+struct port_info_data {
+	uint8_t         port_state;
+	uint8_t         port_type;
+	uint16_t        port_identifier;
+	uint32_t        up_port_state;
+	uint8_t         fw_ver_num[32];
+	uint8_t         portal_attrib;
+	uint16_t        host_option;
+	uint8_t         reset_delay;
+	uint8_t         pdwn_retry_cnt;
+	uint16_t        max_luns2tgt;
+	uint8_t         risc_ver;
+	uint8_t         pconn_option;
+	uint16_t        risc_option;
+	uint16_t        max_frame_len;
+	uint16_t        max_iocb_alloc;
+	uint16_t        exec_throttle;
+	uint8_t         retry_cnt;
+	uint8_t         retry_delay;
+	uint8_t         port_name[8];
+	uint8_t         port_id[3];
+	uint8_t         link_status;
+	uint8_t         plink_rate;
+	uint32_t        link_config;
+	uint16_t        adap_haddr;
+	uint8_t         tgt_disc;
+	uint8_t         log_tout;
+	uint8_t         node_name[8];
+	uint16_t        erisc_opt1;
+	uint8_t         resp_acc_tmr;
+	uint8_t         intr_del_tmr;
+	uint8_t         erisc_opt2;
+	uint8_t         alt_port_name[8];
+	uint8_t         alt_node_name[8];
+	uint8_t         link_down_tout;
+	uint8_t         conn_type;
+	uint8_t         fc_fw_mode;
+	uint32_t        uiReserved[48];
+} __packed;
+
+/* OS Type Designations */
+#define OS_TYPE_UNKNOWN             0
+#define OS_TYPE_LINUX               2
+
+/* Linux Info */
+#define SYSNAME_LENGTH              128
+#define NODENAME_LENGTH             64
+#define RELEASE_LENGTH              64
+#define VERSION_LENGTH              64
+#define MACHINE_LENGTH              64
+#define DOMNAME_LENGTH              64
+
+struct host_system_info {
+	uint32_t os_type;
+	char    sysname[SYSNAME_LENGTH];
+	char    nodename[NODENAME_LENGTH];
+	char    release[RELEASE_LENGTH];
+	char    version[VERSION_LENGTH];
+	char    machine[MACHINE_LENGTH];
+	char    domainname[DOMNAME_LENGTH];
+	char    hostdriver[VERSION_LENGTH];
+	uint32_t reserved[64];
+} __packed;
+
+struct register_host_info {
+	struct host_system_info     hsi;	/* host system info */
+	uint64_t        utc;			/* UTC (system time) */
+	uint32_t        reserved[64];		/* future additions */
+} __packed;
+
+
+#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
+#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
+
+struct config_info_data {
+	uint8_t		model_num[16];
+	uint8_t		model_description[80];
+	uint8_t		reserved0[160];
+	uint8_t		symbolic_name[64];
+	uint8_t		serial_num[32];
+	uint8_t		hw_version[16];
+	uint8_t		fw_version[16];
+	uint8_t		uboot_version[16];
+	uint8_t		fru_serial_num[32];
+
+	uint8_t		fc_port_count;
+	uint8_t		iscsi_port_count;
+	uint8_t		reserved1[2];
+
+	uint8_t		mode;
+	uint8_t		log_level;
+	uint8_t		reserved2[2];
+
+	uint32_t	log_size;
+
+	uint8_t		tgt_pres_mode;
+	uint8_t		iqn_flags;
+	uint8_t		lun_mapping;
+
+	uint64_t	adapter_id;
+
+	uint32_t	cluster_key_len;
+	uint8_t		cluster_key[16];
+
+	uint64_t	cluster_master_id;
+	uint64_t	cluster_slave_id;
+	uint8_t		cluster_flags;
+	uint32_t	enabled_capabilities;
+	uint32_t	nominal_temp_value;
+} __packed;
+
+#define FXDISC_GET_CONFIG_INFO		0x01
+#define FXDISC_GET_PORT_INFO		0x02
+#define FXDISC_GET_TGT_NODE_INFO	0x80
+#define FXDISC_GET_TGT_NODE_LIST	0x81
+#define FXDISC_REG_HOST_INFO		0x99
+#define FXDISC_ABORT_IOCTL		0xff
+
+#define QLAFX00_HBA_ICNTRL_REG		0x20B08
+#define QLAFX00_ICR_ENB_MASK            0x80000000
+#define QLAFX00_ICR_DIS_MASK            0x7fffffff
+#define QLAFX00_HST_RST_REG		0x18264
+#define QLAFX00_SOC_TEMP_REG		0x184C4
+#define QLAFX00_HST_TO_HBA_REG		0x20A04
+#define QLAFX00_HBA_TO_HOST_REG		0x21B70
+#define QLAFX00_HST_INT_STS_BITS	0x7
+#define QLAFX00_BAR1_BASE_ADDR_REG	0x40018
+#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG	0x41824
+
+#define QLAFX00_INTR_MB_CMPLT		0x1
+#define QLAFX00_INTR_RSP_CMPLT		0x2
+#define QLAFX00_INTR_ASYNC_CMPLT	0x4
+
+#define QLAFX00_MBA_SYSTEM_ERR		0x8002
+#define QLAFX00_MBA_TEMP_OVER		0x8005
+#define QLAFX00_MBA_TEMP_NORM		0x8006
+#define	QLAFX00_MBA_TEMP_CRIT		0x8007
+#define QLAFX00_MBA_LINK_UP		0x8011
+#define QLAFX00_MBA_LINK_DOWN		0x8012
+#define QLAFX00_MBA_PORT_UPDATE		0x8014
+#define QLAFX00_MBA_SHUTDOWN_RQSTD	0x8062
+
+#define SOC_SW_RST_CONTROL_REG_CORE0     0x0020800
+#define SOC_FABRIC_RST_CONTROL_REG       0x0020840
+#define SOC_FABRIC_CONTROL_REG           0x0020200
+#define SOC_FABRIC_CONFIG_REG            0x0020204
+#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG  0x001820C
+
+#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG     0x0020B00
+#define SOC_CORE_TIMER_REG                     0x0021850
+#define SOC_IRQ_ACK_REG                        0x00218b4
+
+#define CONTINUE_A64_TYPE_FX00	0x03	/* Continuation entry. */
+
+#define QLAFX00_SET_HST_INTR(ha, value) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+	value)
+
+#define QLAFX00_CLR_HST_INTR(ha, value) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+	~value)
+
+#define QLAFX00_RD_INTR_REG(ha) \
+	RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+
+#define QLAFX00_CLR_INTR_REG(ha, value) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+	~value)
+
+#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
+	WRT_REG_DWORD((ha)->cregbase + off, val)
+
+#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
+	RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_HBA_RST_REG(ha, val)\
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+
+#define QLAFX00_RD_ICNTRL_REG(ha) \
+	RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+
+#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+	(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
+	 QLAFX00_ICR_ENB_MASK))
+
+#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+	(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
+	 QLAFX00_ICR_DIS_MASK))
+
+#define QLAFX00_RD_REG(ha, off) \
+	RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_WR_REG(ha, off, val) \
+	WRT_REG_DWORD((ha)->cregbase + off, val)
+
+struct qla_mt_iocb_rqst_fx00 {
+	__le32 reserved_0;
+
+	__le16 func_type;
+	uint8_t flags;
+	uint8_t reserved_1;
+
+	__le32 dataword;
+
+	__le32 adapid;
+	__le32 adapid_hi;
+
+	__le32 dataword_extra;
+
+	__le16 req_len;
+	__le16 reserved_2;
+
+	__le16 rsp_len;
+	__le16 reserved_3;
+};
+
+struct qla_mt_iocb_rsp_fx00 {
+	uint32_t reserved_1;
+
+	uint16_t func_type;
+	__le16 ioctl_flags;
+
+	__le32 ioctl_data;
+
+	uint32_t adapid;
+	uint32_t adapid_hi;
+
+	uint32_t reserved_2;
+	__le32 seq_number;
+
+	uint8_t reserved_3[20];
+
+	int32_t res_count;
+
+	__le32 status;
+};
+
+
+#define MAILBOX_REGISTER_COUNT_FX00	16
+#define AEN_MAILBOX_REGISTER_COUNT_FX00	8
+#define MAX_FIBRE_DEVICES_FX00	512
+#define MAX_LUNS_FX00		0x1024
+#define MAX_TARGETS_FX00	MAX_ISA_DEVICES
+#define REQUEST_ENTRY_CNT_FX00		512	/* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_FX00		256	/* Number of response entries.*/
+
+/*
+ * Firmware state codes for QLAFX00 adapters
+ */
+#define FSTATE_FX00_CONFIG_WAIT     0x0000	/* Waiting for driver to issue
+						 * Initialize FW Mbox cmd
+						 */
+#define FSTATE_FX00_INITIALIZED     0x1000	/* FW has been initialized by
+						 * the driver
+						 */
+
+#define FX00_DEF_RATOV	10
+
+struct mr_data_fx00 {
+	uint8_t	symbolic_name[64];
+	uint8_t	serial_num[32];
+	uint8_t	hw_version[16];
+	uint8_t	fw_version[16];
+	uint8_t	uboot_version[16];
+	uint8_t	fru_serial_num[32];
+	fc_port_t       fcport;		/* fcport used for requests
+					 * that are not linked
+					 * to a particular target
+					 */
+	uint8_t fw_hbt_en;
+	uint8_t fw_hbt_cnt;
+	uint8_t fw_hbt_miss_cnt;
+	uint32_t old_fw_hbt_cnt;
+	uint16_t fw_reset_timer_tick;
+	uint8_t fw_reset_timer_exp;
+	uint16_t fw_critemp_timer_tick;
+	uint32_t old_aenmbx0_state;
+	uint32_t critical_temperature;
+	bool extended_io_enabled;
+	bool host_info_resend;
+	uint8_t hinfo_resend_timer_tick;
+};
+
+#define QLAFX00_EXTENDED_IO_EN_MASK    0x20
+
+/*
+ * SoC Junction Temperature is stored in
+ * bits 9:1 of SoC Junction Temperature Register
+ * in a firmware specific format format.
+ * To get the temperature in Celsius degrees
+ * the value from this bitfiled should be converted
+ * using this formula:
+ * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825)
+ * where X is the bit field value
+ * this macro reads the register, extracts the bitfield value,
+ * performs the calcualtions and returns temperature in Celsius
+ */
+#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \
+	((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825)
+
+
+#define QLAFX00_LOOP_DOWN_TIME		615     /* 600 */
+#define QLAFX00_HEARTBEAT_INTERVAL	6	/* number of seconds */
+#define QLAFX00_HEARTBEAT_MISS_CNT	3	/* number of miss */
+#define QLAFX00_RESET_INTERVAL		120	/* number of seconds */
+#define QLAFX00_MAX_RESET_INTERVAL	600	/* number of seconds */
+#define QLAFX00_CRITEMP_INTERVAL	60	/* number of seconds */
+#define QLAFX00_HINFO_RESEND_INTERVAL	60	/* number of seconds */
+
+#define QLAFX00_CRITEMP_THRSHLD		80	/* Celsius degrees */
+
+/* Max conncurrent IOs that can be queued */
+#define QLAFX00_MAX_CANQUEUE		1024
+
+/* IOCTL IOCB abort success */
+#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS	0x68
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nvme.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nvme.c
new file mode 100644
index 0000000..6b33a1f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nvme.c
@@ -0,0 +1,738 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_nvme.h"
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
+static struct nvme_fc_port_template qla_nvme_fc_transport;
+
+static void qla_nvme_unregister_remote_port(struct work_struct *);
+
+int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
+{
+	struct nvme_rport *rport;
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_NVME_FC))
+		return 0;
+
+	if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
+		return 0;
+
+	if (!vha->flags.nvme_enabled) {
+		ql_log(ql_log_info, vha, 0x2100,
+		    "%s: Not registering target since Host NVME is not enabled\n",
+		    __func__);
+		return 0;
+	}
+
+	if (!(fcport->nvme_prli_service_param &
+	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
+		return 0;
+
+	INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
+	rport = kzalloc(sizeof(*rport), GFP_KERNEL);
+	if (!rport) {
+		ql_log(ql_log_warn, vha, 0x2101,
+		    "%s: unable to alloc memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	rport->req.port_name = wwn_to_u64(fcport->port_name);
+	rport->req.node_name = wwn_to_u64(fcport->node_name);
+	rport->req.port_role = 0;
+
+	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
+		rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+
+	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
+		rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
+
+	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
+		rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+	rport->req.port_id = fcport->d_id.b24;
+
+	ql_log(ql_log_info, vha, 0x2102,
+	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
+	    __func__, rport->req.node_name, rport->req.port_name,
+	    rport->req.port_id);
+
+	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
+	    &fcport->nvme_remote_port);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0x212e,
+		    "Failed to register remote port. Transport returned %d\n",
+		    ret);
+		return ret;
+	}
+
+	fcport->nvme_remote_port->private = fcport;
+	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
+	rport->fcport = fcport;
+	list_add_tail(&rport->list, &vha->nvme_rport_list);
+	return 0;
+}
+
+/* Allocate a queue for NVMe traffic */
+static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
+    unsigned int qidx, u16 qsize, void **handle)
+{
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha;
+	struct qla_qpair *qpair;
+
+	if (!qidx)
+		qidx++;
+
+	vha = (struct scsi_qla_host *)lport->private;
+	ha = vha->hw;
+
+	ql_log(ql_log_info, vha, 0x2104,
+	    "%s: handle %p, idx =%d, qsize %d\n",
+	    __func__, handle, qidx, qsize);
+
+	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
+		ql_log(ql_log_warn, vha, 0x212f,
+		    "%s: Illegal qidx=%d. Max=%d\n",
+		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
+		return -EINVAL;
+	}
+
+	if (ha->queue_pair_map[qidx]) {
+		*handle = ha->queue_pair_map[qidx];
+		ql_log(ql_log_info, vha, 0x2121,
+		    "Returning existing qpair of %p for idx=%x\n",
+		    *handle, qidx);
+		return 0;
+	}
+
+	ql_log(ql_log_warn, vha, 0xffff,
+	    "allocating q for idx=%x w/o cpu mask\n", qidx);
+	qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+	if (qpair == NULL) {
+		ql_log(ql_log_warn, vha, 0x2122,
+		    "Failed to allocate qpair\n");
+		return -EINVAL;
+	}
+	*handle = qpair;
+
+	return 0;
+}
+
+static void qla_nvme_sp_ls_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *nvme;
+	struct nvmefc_ls_req   *fd;
+	struct nvme_private *priv;
+
+	if (atomic_read(&sp->ref_count) == 0) {
+		ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
+		    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
+		return;
+	}
+
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	if (res)
+		res = -EINVAL;
+
+	nvme = &sp->u.iocb_cmd;
+	fd = nvme->u.nvme.desc;
+	priv = fd->private;
+	priv->comp_status = res;
+	schedule_work(&priv->ls_work);
+	/* work schedule doesn't need the sp */
+	qla2x00_rel_sp(sp);
+}
+
+void qla_nvme_cmpl_io(struct srb_iocb *nvme)
+{
+	srb_t *sp;
+	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+
+	sp = container_of(nvme, srb_t, u.iocb_cmd);
+	fd->done(fd);
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *nvme;
+	struct nvmefc_fcp_req *fd;
+
+	nvme = &sp->u.iocb_cmd;
+	fd = nvme->u.nvme.desc;
+
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
+		goto rel;
+
+	if (unlikely(res == QLA_FUNCTION_FAILED))
+		fd->status = NVME_SC_INTERNAL;
+	else
+		fd->status = 0;
+
+	fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+	list_add_tail(&nvme->u.nvme.entry, &sp->qpair->nvme_done_list);
+	return;
+rel:
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+	struct nvme_private *priv = fd->private;
+	fc_port_t *fcport = rport->private;
+	srb_t *sp = priv->sp;
+	int rval;
+	struct qla_hw_data *ha = fcport->vha->hw;
+
+	rval = ha->isp_ops->abort_command(sp);
+
+	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
+	    "%s: %s LS command for sp=%p on fcport=%p rval=%x\n", __func__,
+	    (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+	    sp, fcport, rval);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+	struct nvme_private *priv =
+	    container_of(work, struct nvme_private, ls_work);
+	struct nvmefc_ls_req *fd = priv->fd;
+
+	fd->done(fd, priv->comp_status);
+}
+
+static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+	fc_port_t *fcport = rport->private;
+	struct srb_iocb   *nvme;
+	struct nvme_private *priv = fd->private;
+	struct scsi_qla_host *vha;
+	int     rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha;
+	srb_t           *sp;
+
+	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+		return rval;
+
+	vha = fcport->vha;
+	ha = vha->hw;
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+	if (!sp)
+		return rval;
+
+	sp->type = SRB_NVME_LS;
+	sp->name = "nvme_ls";
+	sp->done = qla_nvme_sp_ls_done;
+	atomic_set(&sp->ref_count, 1);
+	nvme = &sp->u.iocb_cmd;
+	priv->sp = sp;
+	priv->fd = fd;
+	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
+	nvme->u.nvme.desc = fd;
+	nvme->u.nvme.dir = 0;
+	nvme->u.nvme.dl = 0;
+	nvme->u.nvme.cmd_len = fd->rqstlen;
+	nvme->u.nvme.rsp_len = fd->rsplen;
+	nvme->u.nvme.rsp_dma = fd->rspdma;
+	nvme->u.nvme.timeout_sec = fd->timeout;
+	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
+	    fd->rqstlen, DMA_TO_DEVICE);
+	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+	    fd->rqstlen, DMA_TO_DEVICE);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x700e,
+		    "qla2x00_start_sp failed = %d\n", rval);
+		atomic_dec(&sp->ref_count);
+		wake_up(&sp->nvme_ls_waitq);
+		return rval;
+	}
+
+	return rval;
+}
+
+static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+    struct nvmefc_fcp_req *fd)
+{
+	struct nvme_private *priv = fd->private;
+	srb_t *sp = priv->sp;
+	int rval;
+	fc_port_t *fcport = rport->private;
+	struct qla_hw_data *ha = fcport->vha->hw;
+
+	rval = ha->isp_ops->abort_command(sp);
+
+	ql_dbg(ql_dbg_io, fcport->vha, 0x2127,
+	    "%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__,
+	    (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
+	    sp, fcport, rval);
+}
+
+static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
+{
+	struct scsi_qla_host *vha = lport->private;
+	unsigned long flags;
+	struct qla_qpair *qpair = hw_queue_handle;
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+	qla24xx_process_response_queue(vha, qpair->rsp);
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+}
+
+static int qla2x00_start_nvme_mq(srb_t *sp)
+{
+	unsigned long   flags;
+	uint32_t        *clr_ptr;
+	uint32_t        index;
+	uint32_t        handle;
+	struct cmd_nvme *cmd_pkt;
+	uint16_t        cnt, i;
+	uint16_t        req_cnt;
+	uint16_t        tot_dsds;
+	uint16_t	avail_dsds;
+	uint32_t	*cur_dsd;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct scsi_qla_host *vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_qpair *qpair = sp->qpair;
+	struct srb_iocb *nvme = &sp->u.iocb_cmd;
+	struct scatterlist *sgl, *sg;
+	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+	uint32_t        rval = QLA_SUCCESS;
+
+	tot_dsds = fd->sg_cnt;
+
+	/* Acquire qpair specific lock */
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+
+	/* Setup qpair pointers */
+	req = qpair->req;
+	rsp = qpair->rsp;
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+
+	if (index == req->num_outstanding_cmds) {
+		rval = -1;
+		goto queuing_error;
+	}
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length - (req->ring_index - cnt);
+
+		if (req->cnt < (req_cnt + 2)){
+			rval = -1;
+			goto queuing_error;
+		}
+	}
+
+	if (unlikely(!fd->sqid)) {
+		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
+			nvme->u.nvme.aen_op = 1;
+			atomic_inc(&vha->hw->nvme_active_aen_cnt);
+		}
+	}
+
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	/* Zero out remaining portion of packet. */
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+	cmd_pkt->entry_status = 0;
+
+	/* Update entry type to indicate Command NVME IOCB */
+	cmd_pkt->entry_type = COMMAND_NVME;
+
+	/* No data transfer how do we check buffer len == 0?? */
+	if (fd->io_dir == NVMEFC_FCP_READ) {
+		cmd_pkt->control_flags =
+		    cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
+		vha->qla_stats.input_bytes += fd->payload_length;
+		vha->qla_stats.input_requests++;
+	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
+		cmd_pkt->control_flags =
+		    cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
+		vha->qla_stats.output_bytes += fd->payload_length;
+		vha->qla_stats.output_requests++;
+	} else if (fd->io_dir == 0) {
+		cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+	}
+
+	/* Set NPORT-ID */
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+	/* NVME RSP IU */
+	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
+	cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
+	cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+
+	/* NVME CNMD IU */
+	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
+	cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
+	cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
+
+	/* One DSD is available in the Command Type NVME IOCB */
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+	sgl = fd->first_sgl;
+
+	/* Load data segments */
+	for_each_sg(sgl, sg, tot_dsds, i) {
+		dma_addr_t      sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Continuation
+			 * Type 1 IOCB.
+			 */
+
+			/* Adjust ring index */
+			req->ring_index++;
+			if (req->ring_index == req->length) {
+				req->ring_index = 0;
+				req->ring_ptr = req->ring;
+			} else {
+				req->ring_ptr++;
+			}
+			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+			*((uint32_t *)(&cont_pkt->entry_type)) =
+			    cpu_to_le32(CONTINUE_A64_TYPE);
+
+			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+
+	/* Set total entry count. */
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+	/* Manage unprocessed RIO/ZIO commands in response queue. */
+	if (vha->flags.process_response_queue &&
+	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+		qla24xx_process_response_queue(vha, rsp);
+
+queuing_error:
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+	return rval;
+}
+
+/* Post a command */
+static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+    struct nvmefc_fcp_req *fd)
+{
+	fc_port_t *fcport;
+	struct srb_iocb *nvme;
+	struct scsi_qla_host *vha;
+	int rval = QLA_FUNCTION_FAILED;
+	srb_t *sp;
+	struct qla_qpair *qpair = hw_queue_handle;
+	struct nvme_private *priv;
+
+	if (!fd) {
+		ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
+		return rval;
+	}
+
+	priv = fd->private;
+	fcport = rport->private;
+	if (!fcport) {
+		ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
+		return rval;
+	}
+
+	vha = fcport->vha;
+	if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
+		return -EBUSY;
+
+	/* Alloc SRB structure */
+	sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+	if (!sp)
+		return -EIO;
+
+	atomic_set(&sp->ref_count, 1);
+	init_waitqueue_head(&sp->nvme_ls_waitq);
+	priv->sp = sp;
+	sp->type = SRB_NVME_CMD;
+	sp->name = "nvme_cmd";
+	sp->done = qla_nvme_sp_done;
+	sp->qpair = qpair;
+	nvme = &sp->u.iocb_cmd;
+	nvme->u.nvme.desc = fd;
+
+	rval = qla2x00_start_nvme_mq(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x212d,
+		    "qla2x00_start_nvme_mq failed = %d\n", rval);
+		atomic_dec(&sp->ref_count);
+		wake_up(&sp->nvme_ls_waitq);
+		return -EIO;
+	}
+
+	return rval;
+}
+
+static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
+{
+	struct scsi_qla_host *vha = lport->private;
+
+	ql_log(ql_log_info, vha, 0x210f,
+	    "localport delete of %p completed.\n", vha->nvme_local_port);
+	vha->nvme_local_port = NULL;
+	complete(&vha->nvme_del_done);
+}
+
+static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
+{
+	fc_port_t *fcport;
+	struct nvme_rport *r_port, *trport;
+
+	fcport = rport->private;
+	fcport->nvme_remote_port = NULL;
+	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
+
+	list_for_each_entry_safe(r_port, trport,
+	    &fcport->vha->nvme_rport_list, list) {
+		if (r_port->fcport == fcport) {
+			list_del(&r_port->list);
+			break;
+		}
+	}
+	kfree(r_port);
+	complete(&fcport->nvme_del_done);
+
+	ql_log(ql_log_info, fcport->vha, 0x2110,
+	    "remoteport_delete of %p completed.\n", fcport);
+}
+
+static struct nvme_fc_port_template qla_nvme_fc_transport = {
+	.localport_delete = qla_nvme_localport_delete,
+	.remoteport_delete = qla_nvme_remoteport_delete,
+	.create_queue   = qla_nvme_alloc_queue,
+	.delete_queue 	= NULL,
+	.ls_req		= qla_nvme_ls_req,
+	.ls_abort	= qla_nvme_ls_abort,
+	.fcp_io		= qla_nvme_post_cmd,
+	.fcp_abort	= qla_nvme_fcp_abort,
+	.poll_queue	= qla_nvme_poll,
+	.max_hw_queues  = 8,
+	.max_sgl_segments = 128,
+	.max_dif_sgl_segments = 64,
+	.dma_boundary = 0xFFFFFFFF,
+	.local_priv_sz  = 8,
+	.remote_priv_sz = 0,
+	.lsrqst_priv_sz = sizeof(struct nvme_private),
+	.fcprqst_priv_sz = sizeof(struct nvme_private),
+};
+
+#define NVME_ABORT_POLLING_PERIOD    2
+static int qla_nvme_wait_on_command(srb_t *sp)
+{
+	int ret = QLA_SUCCESS;
+
+	wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
+	    NVME_ABORT_POLLING_PERIOD*HZ);
+
+	if (atomic_read(&sp->ref_count) > 1)
+		ret = QLA_FUNCTION_FAILED;
+
+	return ret;
+}
+
+static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
+{
+	int ret = QLA_SUCCESS;
+	int timeout;
+
+	timeout = wait_for_completion_timeout(&fcport->nvme_del_done,
+	    msecs_to_jiffies(2000));
+	if (!timeout) {
+		ret = QLA_FUNCTION_FAILED;
+		ql_log(ql_log_info, fcport->vha, 0x2111,
+		    "timed out waiting for fcport=%p to delete\n", fcport);
+	}
+
+	return ret;
+}
+
+void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp)
+{
+	int rval;
+
+	rval = ha->isp_ops->abort_command(sp);
+	if (!rval && !qla_nvme_wait_on_command(sp))
+		ql_log(ql_log_warn, NULL, 0x2112,
+		    "nvme_wait_on_comand timed out waiting on sp=%p\n", sp);
+}
+
+static void qla_nvme_unregister_remote_port(struct work_struct *work)
+{
+	struct fc_port *fcport = container_of(work, struct fc_port,
+	    nvme_del_work);
+	struct nvme_rport *rport, *trport;
+
+	if (!IS_ENABLED(CONFIG_NVME_FC))
+		return;
+
+	ql_log(ql_log_warn, NULL, 0x2112,
+	    "%s: unregister remoteport on %p\n",__func__, fcport);
+
+	list_for_each_entry_safe(rport, trport,
+	    &fcport->vha->nvme_rport_list, list) {
+		if (rport->fcport == fcport) {
+			ql_log(ql_log_info, fcport->vha, 0x2113,
+			    "%s: fcport=%p\n", __func__, fcport);
+			init_completion(&fcport->nvme_del_done);
+			nvme_fc_unregister_remoteport(
+			    fcport->nvme_remote_port);
+			qla_nvme_wait_on_rport_del(fcport);
+		}
+	}
+}
+
+void qla_nvme_delete(struct scsi_qla_host *vha)
+{
+	struct nvme_rport *rport, *trport;
+	fc_port_t *fcport;
+	int nv_ret;
+
+	if (!IS_ENABLED(CONFIG_NVME_FC))
+		return;
+
+	list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) {
+		fcport = rport->fcport;
+
+		ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
+		    __func__, fcport);
+
+		init_completion(&fcport->nvme_del_done);
+		nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+		qla_nvme_wait_on_rport_del(fcport);
+	}
+
+	if (vha->nvme_local_port) {
+		init_completion(&vha->nvme_del_done);
+		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
+		if (nv_ret == 0)
+			ql_log(ql_log_info, vha, 0x2116,
+			    "unregistered localport=%p\n",
+			    vha->nvme_local_port);
+		else
+			ql_log(ql_log_info, vha, 0x2115,
+			    "Unregister of localport failed\n");
+		wait_for_completion_timeout(&vha->nvme_del_done,
+		    msecs_to_jiffies(5000));
+	}
+}
+
+void qla_nvme_register_hba(struct scsi_qla_host *vha)
+{
+	struct nvme_fc_port_template *tmpl;
+	struct qla_hw_data *ha;
+	struct nvme_fc_port_info pinfo;
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_NVME_FC))
+		return;
+
+	ha = vha->hw;
+	tmpl = &qla_nvme_fc_transport;
+
+	WARN_ON(vha->nvme_local_port);
+	WARN_ON(ha->max_req_queues < 3);
+
+	qla_nvme_fc_transport.max_hw_queues =
+	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
+		(uint8_t)(ha->max_req_queues - 2));
+
+	pinfo.node_name = wwn_to_u64(vha->node_name);
+	pinfo.port_name = wwn_to_u64(vha->port_name);
+	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+	pinfo.port_id = vha->d_id.b24;
+
+	ql_log(ql_log_info, vha, 0xffff,
+	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
+	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+	ret = nvme_fc_register_localport(&pinfo, tmpl,
+	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "register_localport failed: ret=%x\n", ret);
+		return;
+	}
+	vha->nvme_local_port->private = vha;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nvme.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nvme.h
new file mode 100644
index 0000000..7f05fa1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nvme.h
@@ -0,0 +1,149 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NVME_H
+#define __QLA_NVME_H
+
+#include <linux/blk-mq.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+#include <linux/nvme-fc-driver.h>
+
+#include "qla_def.h"
+
+#define NVME_ATIO_CMD_OFF 32
+#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
+#define Q2T_NVME_NUM_TAGS 2048
+#define QLA_MAX_FC_SEGMENTS 64
+
+struct scsi_qla_host;
+struct qla_hw_data;
+struct req_que;
+struct srb;
+
+struct nvme_private {
+	struct srb	*sp;
+	struct nvmefc_ls_req *fd;
+	struct work_struct ls_work;
+	int comp_status;
+};
+
+struct nvme_rport {
+	struct nvme_fc_port_info req;
+	struct list_head list;
+	struct fc_port *fcport;
+};
+
+#define COMMAND_NVME    0x88            /* Command Type FC-NVMe IOCB */
+struct cmd_nvme {
+	uint8_t entry_type;             /* Entry type. */
+	uint8_t entry_count;            /* Entry count. */
+	uint8_t sys_define;             /* System defined. */
+	uint8_t entry_status;           /* Entry Status. */
+
+	uint32_t handle;                /* System handle. */
+	uint16_t nport_handle;          /* N_PORT handle. */
+	uint16_t timeout;               /* Command timeout. */
+
+	uint16_t dseg_count;            /* Data segment count. */
+	uint16_t nvme_rsp_dsd_len;      /* NVMe RSP DSD length */
+
+	uint64_t rsvd;
+
+	uint16_t control_flags;         /* Control Flags */
+#define CF_NVME_ENABLE                  BIT_9
+#define CF_DIF_SEG_DESCR_ENABLE         BIT_3
+#define CF_DATA_SEG_DESCR_ENABLE        BIT_2
+#define CF_READ_DATA                    BIT_1
+#define CF_WRITE_DATA                   BIT_0
+
+	uint16_t nvme_cmnd_dseg_len;             /* Data segment length. */
+	uint32_t nvme_cmnd_dseg_address[2];      /* Data segment address. */
+	uint32_t nvme_rsp_dseg_address[2];       /* Data segment address. */
+
+	uint32_t byte_count;            /* Total byte count. */
+
+	uint8_t port_id[3];             /* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint32_t nvme_data_dseg_address[2];      /* Data segment address. */
+	uint32_t nvme_data_dseg_len;             /* Data segment length. */
+};
+
+#define PT_LS4_REQUEST 0x89	/* Link Service pass-through IOCB (request) */
+struct pt_ls4_request {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_define;
+	uint8_t entry_status;
+	uint32_t handle;
+	uint16_t status;
+	uint16_t nport_handle;
+	uint16_t tx_dseg_count;
+	uint8_t  vp_index;
+	uint8_t  rsvd;
+	uint16_t timeout;
+	uint16_t control_flags;
+#define CF_LS4_SHIFT		13
+#define CF_LS4_ORIGINATOR	0
+#define CF_LS4_RESPONDER	1
+#define CF_LS4_RESPONDER_TERM	2
+
+	uint16_t rx_dseg_count;
+	uint16_t rsvd2;
+	uint32_t exchange_address;
+	uint32_t rsvd3;
+	uint32_t rx_byte_count;
+	uint32_t tx_byte_count;
+	uint32_t dseg0_address[2];
+	uint32_t dseg0_len;
+	uint32_t dseg1_address[2];
+	uint32_t dseg1_len;
+};
+
+#define PT_LS4_UNSOL 0x56	/* pass-up unsolicited rec FC-NVMe request */
+struct pt_ls4_rx_unsol {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint16_t rsvd0;
+	uint16_t rsvd1;
+	uint8_t vp_index;
+	uint8_t rsvd2;
+	uint16_t rsvd3;
+	uint16_t nport_handle;
+	uint16_t frame_size;
+	uint16_t rsvd4;
+	uint32_t exchange_address;
+	uint8_t d_id[3];
+	uint8_t r_ctl;
+	uint8_t s_id[3];
+	uint8_t cs_ctl;
+	uint8_t f_ctl[3];
+	uint8_t type;
+	uint16_t seq_cnt;
+	uint8_t df_ctl;
+	uint8_t seq_id;
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint32_t param;
+	uint32_t desc0;
+#define PT_LS4_PAYLOAD_OFFSET 0x2c
+#define PT_LS4_FIRST_PACKET_LEN 20
+	uint32_t desc_len;
+	uint32_t payload[3];
+};
+
+/*
+ * Global functions prototype in qla_nvme.c source file.
+ */
+void qla_nvme_register_hba(struct scsi_qla_host *);
+int  qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
+void qla_nvme_delete(struct scsi_qla_host *);
+void qla_nvme_abort(struct qla_hw_data *, struct srb *sp);
+void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
+    struct req_que *);
+void qla24xx_async_gffid_sp_done(void *, int);
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx.c
new file mode 100644
index 0000000..a5b8313
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx.c
@@ -0,0 +1,4516 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_tcq.h>
+
+#define MASK(n)			((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
+	((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
+	((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+#define QLA82XX_PCI_MN_2M   (0)
+#define QLA82XX_PCI_MS_2M   (0x80000)
+#define QLA82XX_PCI_OCM0_2M (0xc0000)
+#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+#define BLOCK_PROTECT_BITS 0x0F
+
+/* CRB window related */
+#define CRB_BLK(off)	((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off)	((off >> 16) & 0xf)
+#define CRB_WINDOW_2M	(0x130060)
+#define QLA82XX_PCI_CAMQM_2M_END	(0x04800800UL)
+#define CRB_HI(off)	((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
+			((off) & 0xf0000))
+#define QLA82XX_PCI_CAMQM_2M_BASE	(0x000ff800UL)
+#define CRB_INDIRECT_2M	(0x1e0000UL)
+
+#define MAX_CRB_XFORM 60
+static unsigned long crb_addr_xform[MAX_CRB_XFORM];
+static int qla82xx_crb_table_initialized;
+
+#define qla82xx_crb_addr_transform(name) \
+	(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
+	QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+	0x410000A8, 0x410000AC,
+	0x410000B8, 0x410000BC
+};
+
+static void qla82xx_crb_addr_transform_setup(void)
+{
+	qla82xx_crb_addr_transform(XDMA);
+	qla82xx_crb_addr_transform(TIMR);
+	qla82xx_crb_addr_transform(SRE);
+	qla82xx_crb_addr_transform(SQN3);
+	qla82xx_crb_addr_transform(SQN2);
+	qla82xx_crb_addr_transform(SQN1);
+	qla82xx_crb_addr_transform(SQN0);
+	qla82xx_crb_addr_transform(SQS3);
+	qla82xx_crb_addr_transform(SQS2);
+	qla82xx_crb_addr_transform(SQS1);
+	qla82xx_crb_addr_transform(SQS0);
+	qla82xx_crb_addr_transform(RPMX7);
+	qla82xx_crb_addr_transform(RPMX6);
+	qla82xx_crb_addr_transform(RPMX5);
+	qla82xx_crb_addr_transform(RPMX4);
+	qla82xx_crb_addr_transform(RPMX3);
+	qla82xx_crb_addr_transform(RPMX2);
+	qla82xx_crb_addr_transform(RPMX1);
+	qla82xx_crb_addr_transform(RPMX0);
+	qla82xx_crb_addr_transform(ROMUSB);
+	qla82xx_crb_addr_transform(SN);
+	qla82xx_crb_addr_transform(QMN);
+	qla82xx_crb_addr_transform(QMS);
+	qla82xx_crb_addr_transform(PGNI);
+	qla82xx_crb_addr_transform(PGND);
+	qla82xx_crb_addr_transform(PGN3);
+	qla82xx_crb_addr_transform(PGN2);
+	qla82xx_crb_addr_transform(PGN1);
+	qla82xx_crb_addr_transform(PGN0);
+	qla82xx_crb_addr_transform(PGSI);
+	qla82xx_crb_addr_transform(PGSD);
+	qla82xx_crb_addr_transform(PGS3);
+	qla82xx_crb_addr_transform(PGS2);
+	qla82xx_crb_addr_transform(PGS1);
+	qla82xx_crb_addr_transform(PGS0);
+	qla82xx_crb_addr_transform(PS);
+	qla82xx_crb_addr_transform(PH);
+	qla82xx_crb_addr_transform(NIU);
+	qla82xx_crb_addr_transform(I2Q);
+	qla82xx_crb_addr_transform(EG);
+	qla82xx_crb_addr_transform(MN);
+	qla82xx_crb_addr_transform(MS);
+	qla82xx_crb_addr_transform(CAS2);
+	qla82xx_crb_addr_transform(CAS1);
+	qla82xx_crb_addr_transform(CAS0);
+	qla82xx_crb_addr_transform(CAM);
+	qla82xx_crb_addr_transform(C2C1);
+	qla82xx_crb_addr_transform(C2C0);
+	qla82xx_crb_addr_transform(SMB);
+	qla82xx_crb_addr_transform(OCM0);
+	/*
+	 * Used only in P3 just define it for P2 also.
+	 */
+	qla82xx_crb_addr_transform(I2C0);
+
+	qla82xx_crb_table_initialized = 1;
+}
+
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+	{{{0, 0,         0,         0} } },
+	{{{1, 0x0100000, 0x0102000, 0x120000},
+	{1, 0x0110000, 0x0120000, 0x130000},
+	{1, 0x0120000, 0x0122000, 0x124000},
+	{1, 0x0130000, 0x0132000, 0x126000},
+	{1, 0x0140000, 0x0142000, 0x128000},
+	{1, 0x0150000, 0x0152000, 0x12a000},
+	{1, 0x0160000, 0x0170000, 0x110000},
+	{1, 0x0170000, 0x0172000, 0x12e000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{1, 0x01e0000, 0x01e0800, 0x122000},
+	{0, 0x0000000, 0x0000000, 0x000000} } } ,
+	{{{1, 0x0200000, 0x0210000, 0x180000} } },
+	{{{0, 0,         0,         0} } },
+	{{{1, 0x0400000, 0x0401000, 0x169000} } },
+	{{{1, 0x0500000, 0x0510000, 0x140000} } },
+	{{{1, 0x0600000, 0x0610000, 0x1c0000} } },
+	{{{1, 0x0700000, 0x0704000, 0x1b8000} } },
+	{{{1, 0x0800000, 0x0802000, 0x170000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{1, 0x08f0000, 0x08f2000, 0x172000} } },
+	{{{1, 0x0900000, 0x0902000, 0x174000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{1, 0x09f0000, 0x09f2000, 0x176000} } },
+	{{{0, 0x0a00000, 0x0a02000, 0x178000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{1, 0x0af0000, 0x0af2000, 0x17a000} } },
+	{{{0, 0x0b00000, 0x0b02000, 0x17c000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+	{{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
+	{{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
+	{{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
+	{{{1, 0x0f00000, 0x0f01000, 0x164000} } },
+	{{{0, 0x1000000, 0x1004000, 0x1a8000} } },
+	{{{1, 0x1100000, 0x1101000, 0x160000} } },
+	{{{1, 0x1200000, 0x1201000, 0x161000} } },
+	{{{1, 0x1300000, 0x1301000, 0x162000} } },
+	{{{1, 0x1400000, 0x1401000, 0x163000} } },
+	{{{1, 0x1500000, 0x1501000, 0x165000} } },
+	{{{1, 0x1600000, 0x1601000, 0x166000} } },
+	{{{0, 0,         0,         0} } },
+	{{{0, 0,         0,         0} } },
+	{{{0, 0,         0,         0} } },
+	{{{0, 0,         0,         0} } },
+	{{{0, 0,         0,         0} } },
+	{{{0, 0,         0,         0} } },
+	{{{1, 0x1d00000, 0x1d10000, 0x190000} } },
+	{{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
+	{{{1, 0x1f00000, 0x1f10000, 0x150000} } },
+	{{{0} } },
+	{{{1, 0x2100000, 0x2102000, 0x120000},
+	{1, 0x2110000, 0x2120000, 0x130000},
+	{1, 0x2120000, 0x2122000, 0x124000},
+	{1, 0x2130000, 0x2132000, 0x126000},
+	{1, 0x2140000, 0x2142000, 0x128000},
+	{1, 0x2150000, 0x2152000, 0x12a000},
+	{1, 0x2160000, 0x2170000, 0x110000},
+	{1, 0x2170000, 0x2172000, 0x12e000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000},
+	{0, 0x0000000, 0x0000000, 0x000000} } },
+	{{{1, 0x2200000, 0x2204000, 0x1b0000} } },
+	{{{0} } },
+	{{{0} } },
+	{{{0} } },
+	{{{0} } },
+	{{{0} } },
+	{{{1, 0x2800000, 0x2804000, 0x1a4000} } },
+	{{{1, 0x2900000, 0x2901000, 0x16b000} } },
+	{{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
+	{{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
+	{{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
+	{{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
+	{{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
+	{{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
+	{{{1, 0x3000000, 0x3000400, 0x1adc00} } },
+	{{{0, 0x3100000, 0x3104000, 0x1a8000} } },
+	{{{1, 0x3200000, 0x3204000, 0x1d4000} } },
+	{{{1, 0x3300000, 0x3304000, 0x1a0000} } },
+	{{{0} } },
+	{{{1, 0x3500000, 0x3500400, 0x1ac000} } },
+	{{{1, 0x3600000, 0x3600400, 0x1ae000} } },
+	{{{1, 0x3700000, 0x3700400, 0x1ae400} } },
+	{{{1, 0x3800000, 0x3804000, 0x1d0000} } },
+	{{{1, 0x3900000, 0x3904000, 0x1b4000} } },
+	{{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
+	{{{0} } },
+	{{{0} } },
+	{{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
+	{{{1, 0x3e00000, 0x3e01000, 0x167000} } },
+	{{{1, 0x3f00000, 0x3f01000, 0x168000} } }
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned qla82xx_crb_hub_agt[64] = {
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
+	0,
+	0,
+	0,
+	0,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
+	0,
+	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
+	0,
+};
+
+/* Device states */
+static char *q_dev_state[] = {
+	 "Unknown",
+	"Cold",
+	"Initializing",
+	"Ready",
+	"Need Reset",
+	"Need Quiescent",
+	"Failed",
+	"Quiescent",
+};
+
+char *qdev_state(uint32_t dev_state)
+{
+	return q_dev_state[dev_state];
+}
+
+/*
+ * In: 'off_in' is offset from CRB space in 128M pci map
+ * Out: 'off_out' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
+			     void __iomem **off_out)
+{
+	u32 win_read;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	ha->crb_win = CRB_HI(off_in);
+	writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase);
+
+	/* Read back value to make sure write has gone through before trying
+	 * to use it.
+	 */
+	win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+	if (win_read != ha->crb_win) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb000,
+		    "%s: Written crbwin (0x%x) "
+		    "!= Read crbwin (0x%x), off=0x%lx.\n",
+		    __func__, ha->crb_win, win_read, off_in);
+	}
+	*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+}
+
+static inline unsigned long
+qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	/* See if we are currently pointing to the region we want to use next */
+	if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
+		/* No need to change window. PCIX and PCIEregs are in both
+		 * regs are in both windows.
+		 */
+		return off;
+	}
+
+	if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
+		/* We are in first CRB window */
+		if (ha->curr_window != 0)
+			WARN_ON(1);
+		return off;
+	}
+
+	if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
+		/* We are in second CRB window */
+		off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
+
+		if (ha->curr_window != 1)
+			return off;
+
+		/* We are in the QM or direct access
+		 * register region - do nothing
+		 */
+		if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
+			(off < QLA82XX_PCI_CAMQM_MAX))
+			return off;
+	}
+	/* strange address given */
+	ql_dbg(ql_dbg_p3p, vha, 0xb001,
+	    "%s: Warning: unm_nic_pci_set_crbwindow "
+	    "called with an unknown address(%llx).\n",
+	    QLA2XXX_DRIVER_NAME, off);
+	return off;
+}
+
+static int
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
+			    void __iomem **off_out)
+{
+	struct crb_128M_2M_sub_block_map *m;
+
+	if (off_in >= QLA82XX_CRB_MAX)
+		return -1;
+
+	if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) {
+		*off_out = (off_in - QLA82XX_PCI_CAMQM) +
+		    QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
+		return 0;
+	}
+
+	if (off_in < QLA82XX_PCI_CRBSPACE)
+		return -1;
+
+	off_in -= QLA82XX_PCI_CRBSPACE;
+
+	/* Try direct map */
+	m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
+
+	if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) {
+		*off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase;
+		return 0;
+	}
+	/* Not in direct map, use crb window */
+	*off_out = (void __iomem *)off_in;
+	return 1;
+}
+
+#define CRB_WIN_LOCK_TIMEOUT 100000000
+static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
+{
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		/* acquire semaphore3 from PCI HW block */
+		done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
+		if (done == 1)
+			break;
+		if (timeout >= CRB_WIN_LOCK_TIMEOUT)
+			return -1;
+		timeout++;
+	}
+	qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
+	return 0;
+}
+
+int
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data)
+{
+	void __iomem *off;
+	unsigned long flags = 0;
+	int rv;
+
+	rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
+
+	BUG_ON(rv == -1);
+
+	if (rv == 1) {
+#ifndef __CHECKER__
+		write_lock_irqsave(&ha->hw_lock, flags);
+#endif
+		qla82xx_crb_win_lock(ha);
+		qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
+	}
+
+	writel(data, (void __iomem *)off);
+
+	if (rv == 1) {
+		qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+#ifndef __CHECKER__
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+#endif
+	}
+	return 0;
+}
+
+int
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
+{
+	void __iomem *off;
+	unsigned long flags = 0;
+	int rv;
+	u32 data;
+
+	rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
+
+	BUG_ON(rv == -1);
+
+	if (rv == 1) {
+#ifndef __CHECKER__
+		write_lock_irqsave(&ha->hw_lock, flags);
+#endif
+		qla82xx_crb_win_lock(ha);
+		qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
+	}
+	data = RD_REG_DWORD(off);
+
+	if (rv == 1) {
+		qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+#ifndef __CHECKER__
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+#endif
+	}
+	return data;
+}
+
+#define IDC_LOCK_TIMEOUT 100000000
+int qla82xx_idc_lock(struct qla_hw_data *ha)
+{
+	int i;
+	int done = 0, timeout = 0;
+
+	while (!done) {
+		/* acquire semaphore5 from PCI HW block */
+		done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
+		if (done == 1)
+			break;
+		if (timeout >= IDC_LOCK_TIMEOUT)
+			return -1;
+
+		timeout++;
+
+		/* Yield CPU */
+		if (!in_interrupt())
+			schedule();
+		else {
+			for (i = 0; i < 20; i++)
+				cpu_relax();
+		}
+	}
+
+	return 0;
+}
+
+void qla82xx_idc_unlock(struct qla_hw_data *ha)
+{
+	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
+}
+
+/*
+ * check memory access boundary.
+ * used by test agent. support ddr access only for now
+ */
+static unsigned long
+qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
+	unsigned long long addr, int size)
+{
+	if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
+		QLA82XX_ADDR_DDR_NET_MAX) ||
+		!addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+		QLA82XX_ADDR_DDR_NET_MAX) ||
+		((size != 1) && (size != 2) && (size != 4) && (size != 8)))
+			return 0;
+	else
+		return 1;
+}
+
+static int qla82xx_pci_set_window_warning_count;
+
+static unsigned long
+qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
+{
+	int window;
+	u32 win_read;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
+		QLA82XX_ADDR_DDR_NET_MAX)) {
+		/* DDR network side */
+		window = MN_WIN(addr);
+		ha->ddr_mn_window = window;
+		qla82xx_wr_32(ha,
+			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+		win_read = qla82xx_rd_32(ha,
+			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+		if ((win_read << 17) != window) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb003,
+			    "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
+			    __func__, window, win_read);
+		}
+		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
+	} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
+		QLA82XX_ADDR_OCM0_MAX)) {
+		unsigned int temp1;
+		if ((addr & 0x00ff800) == 0xff800) {
+			ql_log(ql_log_warn, vha, 0xb004,
+			    "%s: QM access not handled.\n", __func__);
+			addr = -1UL;
+		}
+		window = OCM_WIN(addr);
+		ha->ddr_mn_window = window;
+		qla82xx_wr_32(ha,
+			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
+		win_read = qla82xx_rd_32(ha,
+			ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
+		temp1 = ((window & 0x1FF) << 7) |
+		    ((window & 0x0FFFE0000) >> 17);
+		if (win_read != temp1) {
+			ql_log(ql_log_warn, vha, 0xb005,
+			    "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
+			    __func__, temp1, win_read);
+		}
+		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
+
+	} else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET,
+		QLA82XX_P3_ADDR_QDR_NET_MAX)) {
+		/* QDR network side */
+		window = MS_WIN(addr);
+		ha->qdr_sn_window = window;
+		qla82xx_wr_32(ha,
+			ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
+		win_read = qla82xx_rd_32(ha,
+			ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
+		if (win_read != window) {
+			ql_log(ql_log_warn, vha, 0xb006,
+			    "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
+			    __func__, window, win_read);
+		}
+		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
+	} else {
+		/*
+		 * peg gdb frequently accesses memory that doesn't exist,
+		 * this limits the chit chat so debugging isn't slowed down.
+		 */
+		if ((qla82xx_pci_set_window_warning_count++ < 8) ||
+		    (qla82xx_pci_set_window_warning_count%64 == 0)) {
+			ql_log(ql_log_warn, vha, 0xb007,
+			    "%s: Warning:%s Unknown address range!.\n",
+			    __func__, QLA2XXX_DRIVER_NAME);
+		}
+		addr = -1UL;
+	}
+	return addr;
+}
+
+/* check if address is in the same windows as the previous access */
+static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
+	unsigned long long addr)
+{
+	int			window;
+	unsigned long long	qdr_max;
+
+	qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
+
+	/* DDR network side */
+	if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
+		QLA82XX_ADDR_DDR_NET_MAX))
+		BUG();
+	else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
+		QLA82XX_ADDR_OCM0_MAX))
+		return 1;
+	else if (addr_in_range(addr, QLA82XX_ADDR_OCM1,
+		QLA82XX_ADDR_OCM1_MAX))
+		return 1;
+	else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+		/* QDR network side */
+		window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
+		if (ha->qdr_sn_window == window)
+			return 1;
+	}
+	return 0;
+}
+
+static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
+	u64 off, void *data, int size)
+{
+	unsigned long   flags;
+	void __iomem *addr = NULL;
+	int             ret = 0;
+	u64             start;
+	uint8_t __iomem  *mem_ptr = NULL;
+	unsigned long   mem_base;
+	unsigned long   mem_page;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/*
+	 * If attempting to access unknown address or straddle hw windows,
+	 * do not access.
+	 */
+	start = qla82xx_pci_set_window(ha, off);
+	if ((start == -1UL) ||
+		(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+		ql_log(ql_log_fatal, vha, 0xb008,
+		    "%s out of bound pci memory "
+		    "access, offset is 0x%llx.\n",
+		    QLA2XXX_DRIVER_NAME, off);
+		return -1;
+	}
+
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+	mem_base = pci_resource_start(ha->pdev, 0);
+	mem_page = start & PAGE_MASK;
+	/* Map two pages whenever user tries to access addresses in two
+	* consecutive pages.
+	*/
+	if (mem_page != ((start + size - 1) & PAGE_MASK))
+		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+	else
+		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+	if (mem_ptr == NULL) {
+		*(u8  *)data = 0;
+		return -1;
+	}
+	addr = mem_ptr;
+	addr += start & (PAGE_SIZE - 1);
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	switch (size) {
+	case 1:
+		*(u8  *)data = readb(addr);
+		break;
+	case 2:
+		*(u16 *)data = readw(addr);
+		break;
+	case 4:
+		*(u32 *)data = readl(addr);
+		break;
+	case 8:
+		*(u64 *)data = readq(addr);
+		break;
+	default:
+		ret = -1;
+		break;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return ret;
+}
+
+static int
+qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
+	u64 off, void *data, int size)
+{
+	unsigned long   flags;
+	void  __iomem *addr = NULL;
+	int             ret = 0;
+	u64             start;
+	uint8_t __iomem *mem_ptr = NULL;
+	unsigned long   mem_base;
+	unsigned long   mem_page;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/*
+	 * If attempting to access unknown address or straddle hw windows,
+	 * do not access.
+	 */
+	start = qla82xx_pci_set_window(ha, off);
+	if ((start == -1UL) ||
+		(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
+		write_unlock_irqrestore(&ha->hw_lock, flags);
+		ql_log(ql_log_fatal, vha, 0xb009,
+		    "%s out of bound memory "
+		    "access, offset is 0x%llx.\n",
+		    QLA2XXX_DRIVER_NAME, off);
+		return -1;
+	}
+
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+	mem_base = pci_resource_start(ha->pdev, 0);
+	mem_page = start & PAGE_MASK;
+	/* Map two pages whenever user tries to access addresses in two
+	 * consecutive pages.
+	 */
+	if (mem_page != ((start + size - 1) & PAGE_MASK))
+		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
+	else
+		mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+	if (mem_ptr == NULL)
+		return -1;
+
+	addr = mem_ptr;
+	addr += start & (PAGE_SIZE - 1);
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	switch (size) {
+	case 1:
+		writeb(*(u8  *)data, addr);
+		break;
+	case 2:
+		writew(*(u16 *)data, addr);
+		break;
+	case 4:
+		writel(*(u32 *)data, addr);
+		break;
+	case 8:
+		writeq(*(u64 *)data, addr);
+		break;
+	default:
+		ret = -1;
+		break;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+	if (mem_ptr)
+		iounmap(mem_ptr);
+	return ret;
+}
+
+#define MTU_FUDGE_FACTOR 100
+static unsigned long
+qla82xx_decode_crb_addr(unsigned long addr)
+{
+	int i;
+	unsigned long base_addr, offset, pci_base;
+
+	if (!qla82xx_crb_table_initialized)
+		qla82xx_crb_addr_transform_setup();
+
+	pci_base = ADDR_ERROR;
+	base_addr = addr & 0xfff00000;
+	offset = addr & 0x000fffff;
+
+	for (i = 0; i < MAX_CRB_XFORM; i++) {
+		if (crb_addr_xform[i] == base_addr) {
+			pci_base = i << 20;
+			break;
+		}
+	}
+	if (pci_base == ADDR_ERROR)
+		return pci_base;
+	return pci_base + offset;
+}
+
+static long rom_max_timeout = 100;
+static long qla82xx_rom_lock_timeout = 100;
+
+static int
+qla82xx_rom_lock(struct qla_hw_data *ha)
+{
+	int done = 0, timeout = 0;
+	uint32_t lock_owner = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while (!done) {
+		/* acquire semaphore2 from PCI HW block */
+		done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
+		if (done == 1)
+			break;
+		if (timeout >= qla82xx_rom_lock_timeout) {
+			lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+			ql_dbg(ql_dbg_p3p, vha, 0xb157,
+			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+			    __func__, ha->portnum, lock_owner);
+			return -1;
+		}
+		timeout++;
+	}
+	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
+	return 0;
+}
+
+static void
+qla82xx_rom_unlock(struct qla_hw_data *ha)
+{
+	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
+	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
+}
+
+static int
+qla82xx_wait_rom_busy(struct qla_hw_data *ha)
+{
+	long timeout = 0;
+	long done = 0 ;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while (done == 0) {
+		done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+		done &= 4;
+		timeout++;
+		if (timeout >= rom_max_timeout) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb00a,
+			    "%s: Timeout reached waiting for rom busy.\n",
+			    QLA2XXX_DRIVER_NAME);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
+qla82xx_wait_rom_done(struct qla_hw_data *ha)
+{
+	long timeout = 0;
+	long done = 0 ;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while (done == 0) {
+		done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
+		done &= 2;
+		timeout++;
+		if (timeout >= rom_max_timeout) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb00b,
+			    "%s: Timeout reached waiting for rom done.\n",
+			    QLA2XXX_DRIVER_NAME);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
+qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
+{
+	uint32_t  off_value, rval = 0;
+
+	WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
+
+	/* Read back value to make sure write has gone through */
+	RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+	off_value  = (off & 0x0000FFFF);
+
+	if (flag)
+		WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+			      data);
+	else
+		rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+				    ha->nx_pcibase);
+
+	return rval;
+}
+
+static int
+qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+	/* Dword reads to flash. */
+	qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
+	*valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
+	    (addr & 0x0000FFFF), 0, 0);
+
+	return 0;
+}
+
+static int
+qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
+{
+	int ret, loops = 0;
+	uint32_t lock_owner = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+		udelay(100);
+		schedule();
+		loops++;
+	}
+	if (loops >= 50000) {
+		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+		ql_log(ql_log_fatal, vha, 0x00b9,
+		    "Failed to acquire SEM2 lock, Lock Owner %u.\n",
+		    lock_owner);
+		return -1;
+	}
+	ret = qla82xx_do_rom_fast_read(ha, addr, valp);
+	qla82xx_rom_unlock(ha);
+	return ret;
+}
+
+static int
+qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
+	qla82xx_wait_rom_busy(ha);
+	if (qla82xx_wait_rom_done(ha)) {
+		ql_log(ql_log_warn, vha, 0xb00c,
+		    "Error waiting for rom done.\n");
+		return -1;
+	}
+	*val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
+	return 0;
+}
+
+static int
+qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
+{
+	long timeout = 0;
+	uint32_t done = 1 ;
+	uint32_t val;
+	int ret = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+	while ((done != 0) && (ret == 0)) {
+		ret = qla82xx_read_status_reg(ha, &val);
+		done = val & 1;
+		timeout++;
+		udelay(10);
+		cond_resched();
+		if (timeout >= 50000) {
+			ql_log(ql_log_warn, vha, 0xb00d,
+			    "Timeout reached waiting for write finish.\n");
+			return -1;
+		}
+	}
+	return ret;
+}
+
+static int
+qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
+{
+	uint32_t val;
+	qla82xx_wait_rom_busy(ha);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
+	qla82xx_wait_rom_busy(ha);
+	if (qla82xx_wait_rom_done(ha))
+		return -1;
+	if (qla82xx_read_status_reg(ha, &val) != 0)
+		return -1;
+	if ((val & 2) != 2)
+		return -1;
+	return 0;
+}
+
+static int
+qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	if (qla82xx_flash_set_write_enable(ha))
+		return -1;
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
+	if (qla82xx_wait_rom_done(ha)) {
+		ql_log(ql_log_warn, vha, 0xb00e,
+		    "Error waiting for rom done.\n");
+		return -1;
+	}
+	return qla82xx_flash_wait_write_finish(ha);
+}
+
+static int
+qla82xx_write_disable_flash(struct qla_hw_data *ha)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
+	if (qla82xx_wait_rom_done(ha)) {
+		ql_log(ql_log_warn, vha, 0xb00f,
+		    "Error waiting for rom done.\n");
+		return -1;
+	}
+	return 0;
+}
+
+static int
+ql82xx_rom_lock_d(struct qla_hw_data *ha)
+{
+	int loops = 0;
+	uint32_t lock_owner = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
+		udelay(100);
+		cond_resched();
+		loops++;
+	}
+	if (loops >= 50000) {
+		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+		ql_log(ql_log_warn, vha, 0xb010,
+		    "ROM lock failed, Lock Owner %u.\n", lock_owner);
+		return -1;
+	}
+	return 0;
+}
+
+static int
+qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
+	uint32_t data)
+{
+	int ret = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	ret = ql82xx_rom_lock_d(ha);
+	if (ret < 0) {
+		ql_log(ql_log_warn, vha, 0xb011,
+		    "ROM lock failed.\n");
+		return ret;
+	}
+
+	if (qla82xx_flash_set_write_enable(ha))
+		goto done_write;
+
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
+	qla82xx_wait_rom_busy(ha);
+	if (qla82xx_wait_rom_done(ha)) {
+		ql_log(ql_log_warn, vha, 0xb012,
+		    "Error waiting for rom done.\n");
+		ret = -1;
+		goto done_write;
+	}
+
+	ret = qla82xx_flash_wait_write_finish(ha);
+
+done_write:
+	qla82xx_rom_unlock(ha);
+	return ret;
+}
+
+/* This routine does CRB initialize sequence
+ *  to put the ISP into operational state
+ */
+static int
+qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
+{
+	int addr, val;
+	int i ;
+	struct crb_addr_pair *buf;
+	unsigned long off;
+	unsigned offset, n;
+	struct qla_hw_data *ha = vha->hw;
+
+	struct crb_addr_pair {
+		long addr;
+		long data;
+	};
+
+	/* Halt all the individual PEGs and other blocks of the ISP */
+	qla82xx_rom_lock(ha);
+
+	/* disable all I2Q */
+	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
+
+	/* disable all niu interrupts */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+	/* disable xge rx/tx */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+	/* disable xg1 rx/tx */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+	/* disable sideband mac */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
+	/* disable ap0 mac */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
+	/* disable ap1 mac */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
+
+	/* halt sre */
+	val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+	qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+
+	/* halt epg */
+	qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+
+	/* halt timers */
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
+
+	/* halt pegs */
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+	msleep(20);
+
+	/* big hammer */
+	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+		/* don't reset CAM block on reset */
+		qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+	else
+		qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+	qla82xx_rom_unlock(ha);
+
+	/* Read the signature value from the flash.
+	 * Offset 0: Contain signature (0xcafecafe)
+	 * Offset 4: Offset and number of addr/value pairs
+	 * that present in CRB initialize sequence
+	 */
+	if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
+	    qla82xx_rom_fast_read(ha, 4, &n) != 0) {
+		ql_log(ql_log_fatal, vha, 0x006e,
+		    "Error Reading crb_init area: n: %08x.\n", n);
+		return -1;
+	}
+
+	/* Offset in flash = lower 16 bits
+	 * Number of entries = upper 16 bits
+	 */
+	offset = n & 0xffffU;
+	n = (n >> 16) & 0xffffU;
+
+	/* number of addr/value pair should not exceed 1024 entries */
+	if (n  >= 1024) {
+		ql_log(ql_log_fatal, vha, 0x0071,
+		    "Card flash not initialized:n=0x%x.\n", n);
+		return -1;
+	}
+
+	ql_log(ql_log_info, vha, 0x0072,
+	    "%d CRB init values found in ROM.\n", n);
+
+	buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
+	if (buf == NULL) {
+		ql_log(ql_log_fatal, vha, 0x010c,
+		    "Unable to allocate memory.\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < n; i++) {
+		if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
+		    qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
+			kfree(buf);
+			return -1;
+		}
+
+		buf[i].addr = addr;
+		buf[i].data = val;
+	}
+
+	for (i = 0; i < n; i++) {
+		/* Translate internal CRB initialization
+		 * address to PCI bus address
+		 */
+		off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
+		    QLA82XX_PCI_CRBSPACE;
+		/* Not all CRB  addr/value pair to be written,
+		 * some of them are skipped
+		 */
+
+		/* skipping cold reboot MAGIC */
+		if (off == QLA82XX_CAM_RAM(0x1fc))
+			continue;
+
+		/* do not reset PCI */
+		if (off == (ROMUSB_GLB + 0xbc))
+			continue;
+
+		/* skip core clock, so that firmware can increase the clock */
+		if (off == (ROMUSB_GLB + 0xc8))
+			continue;
+
+		/* skip the function enable register */
+		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
+			continue;
+
+		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
+			continue;
+
+		if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
+			continue;
+
+		if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
+			continue;
+
+		if (off == ADDR_ERROR) {
+			ql_log(ql_log_fatal, vha, 0x0116,
+			    "Unknown addr: 0x%08lx.\n", buf[i].addr);
+			continue;
+		}
+
+		qla82xx_wr_32(ha, off, buf[i].data);
+
+		/* ISP requires much bigger delay to settle down,
+		 * else crb_window returns 0xffffffff
+		 */
+		if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
+			msleep(1000);
+
+		/* ISP requires millisec delay between
+		 * successive CRB register updation
+		 */
+		msleep(1);
+	}
+
+	kfree(buf);
+
+	/* Resetting the data and instruction cache */
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
+
+	/* Clear all protocol processing engines */
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
+	return 0;
+}
+
+static int
+qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
+		u64 off, void *data, int size)
+{
+	int i, j, ret = 0, loop, sz[2], off0;
+	int scale, shift_amount, startword;
+	uint32_t temp;
+	uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
+
+	/*
+	 * If not MN, go check for MS or invalid.
+	 */
+	if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+		mem_crb = QLA82XX_CRB_QDR_NET;
+	else {
+		mem_crb = QLA82XX_CRB_DDR_NET;
+		if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+			return qla82xx_pci_mem_write_direct(ha,
+			    off, data, size);
+	}
+
+	off0 = off & 0x7;
+	sz[0] = (size < (8 - off0)) ? size : (8 - off0);
+	sz[1] = size - sz[0];
+
+	off8 = off & 0xfffffff0;
+	loop = (((off & 0xf) + size - 1) >> 4) + 1;
+	shift_amount = 4;
+	scale = 2;
+	startword = (off & 0xf)/8;
+
+	for (i = 0; i < loop; i++) {
+		if (qla82xx_pci_mem_read_2M(ha, off8 +
+		    (i << shift_amount), &word[i * scale], 8))
+			return -1;
+	}
+
+	switch (size) {
+	case 1:
+		tmpw = *((uint8_t *)data);
+		break;
+	case 2:
+		tmpw = *((uint16_t *)data);
+		break;
+	case 4:
+		tmpw = *((uint32_t *)data);
+		break;
+	case 8:
+	default:
+		tmpw = *((uint64_t *)data);
+		break;
+	}
+
+	if (sz[0] == 8) {
+		word[startword] = tmpw;
+	} else {
+		word[startword] &=
+			~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
+		word[startword] |= tmpw << (off0 * 8);
+	}
+	if (sz[1] != 0) {
+		word[startword+1] &= ~(~0ULL << (sz[1] * 8));
+		word[startword+1] |= tmpw >> (sz[0] * 8);
+	}
+
+	for (i = 0; i < loop; i++) {
+		temp = off8 + (i << shift_amount);
+		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
+		temp = 0;
+		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
+		temp = word[i * scale] & 0xffffffff;
+		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
+		temp = (word[i * scale] >> 32) & 0xffffffff;
+		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
+		temp = word[i*scale + 1] & 0xffffffff;
+		qla82xx_wr_32(ha, mem_crb +
+		    MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
+		temp = (word[i*scale + 1] >> 32) & 0xffffffff;
+		qla82xx_wr_32(ha, mem_crb +
+		    MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
+
+		temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+		temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
+		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+			if ((temp & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			if (printk_ratelimit())
+				dev_err(&ha->pdev->dev,
+				    "failed to write through agent.\n");
+			ret = -1;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int
+qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
+{
+	int  i;
+	long size = 0;
+	long flashaddr = ha->flt_region_bootload << 2;
+	long memaddr = BOOTLD_START;
+	u64 data;
+	u32 high, low;
+	size = (IMAGE_START - BOOTLD_START) / 8;
+
+	for (i = 0; i < size; i++) {
+		if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
+		    (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
+			return -1;
+		}
+		data = ((u64)high << 32) | low ;
+		qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
+		flashaddr += 8;
+		memaddr += 8;
+
+		if (i % 0x1000 == 0)
+			msleep(1);
+	}
+	udelay(100);
+	read_lock(&ha->hw_lock);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+	read_unlock(&ha->hw_lock);
+	return 0;
+}
+
+int
+qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
+		u64 off, void *data, int size)
+{
+	int i, j = 0, k, start, end, loop, sz[2], off0[2];
+	int	      shift_amount;
+	uint32_t      temp;
+	uint64_t      off8, val, mem_crb, word[2] = {0, 0};
+
+	/*
+	 * If not MN, go check for MS or invalid.
+	 */
+
+	if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
+		mem_crb = QLA82XX_CRB_QDR_NET;
+	else {
+		mem_crb = QLA82XX_CRB_DDR_NET;
+		if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
+			return qla82xx_pci_mem_read_direct(ha,
+			    off, data, size);
+	}
+
+	off8 = off & 0xfffffff0;
+	off0[0] = off & 0xf;
+	sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
+	shift_amount = 4;
+	loop = ((off0[0] + size - 1) >> shift_amount) + 1;
+	off0[1] = 0;
+	sz[1] = size - sz[0];
+
+	for (i = 0; i < loop; i++) {
+		temp = off8 + (i << shift_amount);
+		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
+		temp = 0;
+		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
+		temp = MIU_TA_CTL_ENABLE;
+		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+		temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
+			if ((temp & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			if (printk_ratelimit())
+				dev_err(&ha->pdev->dev,
+				    "failed to read through agent.\n");
+			break;
+		}
+
+		start = off0[i] >> 2;
+		end   = (off0[i] + sz[i] - 1) >> 2;
+		for (k = start; k <= end; k++) {
+			temp = qla82xx_rd_32(ha,
+					mem_crb + MIU_TEST_AGT_RDDATA(k));
+			word[i] |= ((uint64_t)temp << (32 * (k & 1)));
+		}
+	}
+
+	if (j >= MAX_CTL_CHECK)
+		return -1;
+
+	if ((off0[0] & 7) == 0) {
+		val = word[0];
+	} else {
+		val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
+			((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
+	}
+
+	switch (size) {
+	case 1:
+		*(uint8_t  *)data = val;
+		break;
+	case 2:
+		*(uint16_t *)data = val;
+		break;
+	case 4:
+		*(uint32_t *)data = val;
+		break;
+	case 8:
+		*(uint64_t *)data = val;
+		break;
+	}
+	return 0;
+}
+
+
+static struct qla82xx_uri_table_desc *
+qla82xx_get_table_desc(const u8 *unirom, int section)
+{
+	uint32_t i;
+	struct qla82xx_uri_table_desc *directory =
+		(struct qla82xx_uri_table_desc *)&unirom[0];
+	__le32 offset;
+	__le32 tab_type;
+	__le32 entries = cpu_to_le32(directory->num_entries);
+
+	for (i = 0; i < entries; i++) {
+		offset = cpu_to_le32(directory->findex) +
+		    (i * cpu_to_le32(directory->entry_size));
+		tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
+
+		if (tab_type == section)
+			return (struct qla82xx_uri_table_desc *)&unirom[offset];
+	}
+
+	return NULL;
+}
+
+static struct qla82xx_uri_data_desc *
+qla82xx_get_data_desc(struct qla_hw_data *ha,
+	u32 section, u32 idx_offset)
+{
+	const u8 *unirom = ha->hablob->fw->data;
+	int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
+	struct qla82xx_uri_table_desc *tab_desc = NULL;
+	__le32 offset;
+
+	tab_desc = qla82xx_get_table_desc(unirom, section);
+	if (!tab_desc)
+		return NULL;
+
+	offset = cpu_to_le32(tab_desc->findex) +
+	    (cpu_to_le32(tab_desc->entry_size) * idx);
+
+	return (struct qla82xx_uri_data_desc *)&unirom[offset];
+}
+
+static u8 *
+qla82xx_get_bootld_offset(struct qla_hw_data *ha)
+{
+	u32 offset = BOOTLD_START;
+	struct qla82xx_uri_data_desc *uri_desc = NULL;
+
+	if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+		uri_desc = qla82xx_get_data_desc(ha,
+		    QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
+		if (uri_desc)
+			offset = cpu_to_le32(uri_desc->findex);
+	}
+
+	return (u8 *)&ha->hablob->fw->data[offset];
+}
+
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
+{
+	struct qla82xx_uri_data_desc *uri_desc = NULL;
+
+	if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+		uri_desc =  qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
+		    QLA82XX_URI_FIRMWARE_IDX_OFF);
+		if (uri_desc)
+			return cpu_to_le32(uri_desc->size);
+	}
+
+	return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+}
+
+static u8 *
+qla82xx_get_fw_offs(struct qla_hw_data *ha)
+{
+	u32 offset = IMAGE_START;
+	struct qla82xx_uri_data_desc *uri_desc = NULL;
+
+	if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+		uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
+			QLA82XX_URI_FIRMWARE_IDX_OFF);
+		if (uri_desc)
+			offset = cpu_to_le32(uri_desc->findex);
+	}
+
+	return (u8 *)&ha->hablob->fw->data[offset];
+}
+
+/* PCI related functions */
+int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
+{
+	unsigned long val = 0;
+	u32 control;
+
+	switch (region) {
+	case 0:
+		val = 0;
+		break;
+	case 1:
+		pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
+		val = control + QLA82XX_MSIX_TBL_SPACE;
+		break;
+	}
+	return val;
+}
+
+
+int
+qla82xx_iospace_config(struct qla_hw_data *ha)
+{
+	uint32_t len = 0;
+
+	if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
+		    "Failed to reserver selected regions.\n");
+		goto iospace_error_exit;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
+		    "Region #0 not an MMIO resource, aborting.\n");
+		goto iospace_error_exit;
+	}
+
+	len = pci_resource_len(ha->pdev, 0);
+	ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len);
+	if (!ha->nx_pcibase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
+		    "Cannot remap pcibase MMIO, aborting.\n");
+		goto iospace_error_exit;
+	}
+
+	/* Mapping of IO base pointer */
+	if (IS_QLA8044(ha)) {
+		ha->iobase = ha->nx_pcibase;
+	} else if (IS_QLA82XX(ha)) {
+		ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11);
+	}
+
+	if (!ql2xdbwr) {
+		ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) +
+		    (ha->pdev->devfn << 12)), 4);
+		if (!ha->nxdb_wr_ptr) {
+			ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
+			    "Cannot remap MMIO, aborting.\n");
+			goto iospace_error_exit;
+		}
+
+		/* Mapping of IO base pointer,
+		 * door bell read and write pointer
+		 */
+		ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) +
+		    (ha->pdev->devfn * 8);
+	} else {
+		ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ?
+			QLA82XX_CAMRAM_DB1 :
+			QLA82XX_CAMRAM_DB2);
+	}
+
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+	ha->msix_count = ha->max_rsp_queues + 1;
+	ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
+	    "nx_pci_base=%p iobase=%p "
+	    "max_req_queues=%d msix_count=%d.\n",
+	    ha->nx_pcibase, ha->iobase,
+	    ha->max_req_queues, ha->msix_count);
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
+	    "nx_pci_base=%p iobase=%p "
+	    "max_req_queues=%d msix_count=%d.\n",
+	    ha->nx_pcibase, ha->iobase,
+	    ha->max_req_queues, ha->msix_count);
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+/* GS related functions */
+
+/* Initialization related functions */
+
+/**
+ * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+*/
+int
+qla82xx_pci_config(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int ret;
+
+	pci_set_master(ha->pdev);
+	ret = pci_set_mwi(ha->pdev);
+	ha->chip_revision = ha->pdev->revision;
+	ql_dbg(ql_dbg_init, vha, 0x0043,
+	    "Chip revision:%d; pci_set_mwi() returned %d.\n",
+	    ha->chip_revision, ret);
+	return 0;
+}
+
+/**
+ * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qla82xx_reset_chip(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	ha->isp_ops->disable_intrs(ha);
+}
+
+void qla82xx_config_rings(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+	struct init_cb_81xx *icb;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	/* Setup ring parameters in initialization control block. */
+	icb = (struct init_cb_81xx *)ha->init_cb;
+	icb->request_q_outpointer = cpu_to_le16(0);
+	icb->response_q_inpointer = cpu_to_le16(0);
+	icb->request_q_length = cpu_to_le16(req->length);
+	icb->response_q_length = cpu_to_le16(rsp->length);
+	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+	WRT_REG_DWORD(&reg->req_q_out[0], 0);
+	WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
+	WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
+}
+
+static int
+qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
+{
+	u64 *ptr64;
+	u32 i, flashaddr, size;
+	__le64 data;
+
+	size = (IMAGE_START - BOOTLD_START) / 8;
+
+	ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
+	flashaddr = BOOTLD_START;
+
+	for (i = 0; i < size; i++) {
+		data = cpu_to_le64(ptr64[i]);
+		if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+			return -EIO;
+		flashaddr += 8;
+	}
+
+	flashaddr = FLASH_ADDR_START;
+	size = qla82xx_get_fw_size(ha) / 8;
+	ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
+
+	for (i = 0; i < size; i++) {
+		data = cpu_to_le64(ptr64[i]);
+
+		if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
+			return -EIO;
+		flashaddr += 8;
+	}
+	udelay(100);
+
+	/* Write a magic value to CAMRAM register
+	 * at a specified offset to indicate
+	 * that all data is written and
+	 * ready for firmware to initialize.
+	 */
+	qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
+
+	read_lock(&ha->hw_lock);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
+	read_unlock(&ha->hw_lock);
+	return 0;
+}
+
+static int
+qla82xx_set_product_offset(struct qla_hw_data *ha)
+{
+	struct qla82xx_uri_table_desc *ptab_desc = NULL;
+	const uint8_t *unirom = ha->hablob->fw->data;
+	uint32_t i;
+	__le32 entries;
+	__le32 flags, file_chiprev, offset;
+	uint8_t chiprev = ha->chip_revision;
+	/* Hardcoding mn_present flag for P3P */
+	int mn_present = 0;
+	uint32_t flagbit;
+
+	ptab_desc = qla82xx_get_table_desc(unirom,
+		 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
+	if (!ptab_desc)
+		return -1;
+
+	entries = cpu_to_le32(ptab_desc->num_entries);
+
+	for (i = 0; i < entries; i++) {
+		offset = cpu_to_le32(ptab_desc->findex) +
+			(i * cpu_to_le32(ptab_desc->entry_size));
+		flags = cpu_to_le32(*((int *)&unirom[offset] +
+			QLA82XX_URI_FLAGS_OFF));
+		file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
+			QLA82XX_URI_CHIP_REV_OFF));
+
+		flagbit = mn_present ? 1 : 2;
+
+		if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
+			ha->file_prd_off = offset;
+			return 0;
+		}
+	}
+	return -1;
+}
+
+static int
+qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
+{
+	__le32 val;
+	uint32_t min_size;
+	struct qla_hw_data *ha = vha->hw;
+	const struct firmware *fw = ha->hablob->fw;
+
+	ha->fw_type = fw_type;
+
+	if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
+		if (qla82xx_set_product_offset(ha))
+			return -EINVAL;
+
+		min_size = QLA82XX_URI_FW_MIN_SIZE;
+	} else {
+		val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
+		if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
+			return -EINVAL;
+
+		min_size = QLA82XX_FW_MIN_SIZE;
+	}
+
+	if (fw->size < min_size)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
+{
+	u32 val = 0;
+	int retries = 60;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	do {
+		read_lock(&ha->hw_lock);
+		val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
+		read_unlock(&ha->hw_lock);
+
+		switch (val) {
+		case PHAN_INITIALIZE_COMPLETE:
+		case PHAN_INITIALIZE_ACK:
+			return QLA_SUCCESS;
+		case PHAN_INITIALIZE_FAILED:
+			break;
+		default:
+			break;
+		}
+		ql_log(ql_log_info, vha, 0x00a8,
+		    "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
+		    val, retries);
+
+		msleep(500);
+
+	} while (--retries);
+
+	ql_log(ql_log_fatal, vha, 0x00a9,
+	    "Cmd Peg initialization failed: 0x%x.\n", val);
+
+	val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
+	read_lock(&ha->hw_lock);
+	qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+	read_unlock(&ha->hw_lock);
+	return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
+{
+	u32 val = 0;
+	int retries = 60;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	do {
+		read_lock(&ha->hw_lock);
+		val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
+		read_unlock(&ha->hw_lock);
+
+		switch (val) {
+		case PHAN_INITIALIZE_COMPLETE:
+		case PHAN_INITIALIZE_ACK:
+			return QLA_SUCCESS;
+		case PHAN_INITIALIZE_FAILED:
+			break;
+		default:
+			break;
+		}
+		ql_log(ql_log_info, vha, 0x00ab,
+		    "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
+		    val, retries);
+
+		msleep(500);
+
+	} while (--retries);
+
+	ql_log(ql_log_fatal, vha, 0x00ac,
+	    "Rcv Peg initializatin failed: 0x%x.\n", val);
+	read_lock(&ha->hw_lock);
+	qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
+	read_unlock(&ha->hw_lock);
+	return QLA_FUNCTION_FAILED;
+}
+
+/* ISR related functions */
+static struct qla82xx_legacy_intr_set legacy_intr[] = \
+	QLA82XX_LEGACY_INTR_CONFIG;
+
+/*
+ * qla82xx_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb0: Mailbox0 register
+ */
+void
+qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
+{
+	uint16_t	cnt;
+	uint16_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
+	wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+
+	/* Load return mailbox registers. */
+	ha->flags.mbox_int = 1;
+	ha->mailbox_out[0] = mb0;
+
+	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+		ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+		wptr++;
+	}
+
+	if (!ha->mcp)
+		ql_dbg(ql_dbg_async, vha, 0x5053,
+		    "MBX pointer ERROR.\n");
+}
+
+/*
+ * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ * @regs:
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla82xx_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_82xx __iomem *reg;
+	int status = 0, status1 = 0;
+	unsigned long	flags;
+	unsigned long	iter;
+	uint32_t	stat = 0;
+	uint16_t	mb[4];
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0xb053,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+
+	if (!ha->flags.msi_enabled) {
+		status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
+		if (!(status & ha->nx_legacy_intr.int_vec_bit))
+			return IRQ_NONE;
+
+		status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
+		if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
+			return IRQ_NONE;
+	}
+
+	/* clear the interrupt */
+	qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
+
+	/* read twice to ensure write is flushed */
+	qla82xx_rd_32(ha, ISR_INT_VECTOR);
+	qla82xx_rd_32(ha, ISR_INT_VECTOR);
+
+	reg = &ha->iobase->isp82;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	for (iter = 1; iter--; ) {
+
+		if (RD_REG_DWORD(&reg->host_int)) {
+			stat = RD_REG_DWORD(&reg->host_status);
+
+			switch (stat & 0xff) {
+			case 0x1:
+			case 0x2:
+			case 0x10:
+			case 0x11:
+				qla82xx_mbx_completion(vha, MSW(stat));
+				status |= MBX_INTERRUPT;
+				break;
+			case 0x12:
+				mb[0] = MSW(stat);
+				mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+				mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+				mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+				qla2x00_async_event(vha, rsp, mb);
+				break;
+			case 0x13:
+				qla24xx_process_response_queue(vha, rsp);
+				break;
+			default:
+				ql_dbg(ql_dbg_async, vha, 0x5054,
+				    "Unrecognized interrupt type (%d).\n",
+				    stat & 0xff);
+				break;
+			}
+		}
+		WRT_REG_DWORD(&reg->host_int, 0);
+	}
+
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (!ha->flags.msi_enabled)
+		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_default(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_82xx __iomem *reg;
+	int status = 0;
+	unsigned long flags;
+	uint32_t stat = 0;
+	uint32_t host_int = 0;
+	uint16_t mb[4];
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		printk(KERN_INFO
+			"%s(): NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+
+	reg = &ha->iobase->isp82;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	do {
+		host_int = RD_REG_DWORD(&reg->host_int);
+		if (qla2x00_check_reg32_for_disconnect(vha, host_int))
+			break;
+		if (host_int) {
+			stat = RD_REG_DWORD(&reg->host_status);
+
+			switch (stat & 0xff) {
+			case 0x1:
+			case 0x2:
+			case 0x10:
+			case 0x11:
+				qla82xx_mbx_completion(vha, MSW(stat));
+				status |= MBX_INTERRUPT;
+				break;
+			case 0x12:
+				mb[0] = MSW(stat);
+				mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+				mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+				mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+				qla2x00_async_event(vha, rsp, mb);
+				break;
+			case 0x13:
+				qla24xx_process_response_queue(vha, rsp);
+				break;
+			default:
+				ql_dbg(ql_dbg_async, vha, 0x5041,
+				    "Unrecognized interrupt type (%d).\n",
+				    stat & 0xff);
+				break;
+			}
+		}
+		WRT_REG_DWORD(&reg->host_int, 0);
+	} while (0);
+
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t
+qla82xx_msix_rsp_q(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_82xx __iomem *reg;
+	unsigned long flags;
+	uint32_t host_int = 0;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		printk(KERN_INFO
+			"%s(): NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+
+	ha = rsp->hw;
+	reg = &ha->iobase->isp82;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	host_int = RD_REG_DWORD(&reg->host_int);
+	if (qla2x00_check_reg32_for_disconnect(vha, host_int))
+		goto out;
+	qla24xx_process_response_queue(vha, rsp);
+	WRT_REG_DWORD(&reg->host_int, 0);
+out:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return IRQ_HANDLED;
+}
+
+void
+qla82xx_poll(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_82xx __iomem *reg;
+	int status = 0;
+	uint32_t stat;
+	uint32_t host_int = 0;
+	uint16_t mb[4];
+	unsigned long flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		printk(KERN_INFO
+			"%s(): NULL response queue pointer.\n", __func__);
+		return;
+	}
+	ha = rsp->hw;
+
+	reg = &ha->iobase->isp82;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+
+	host_int = RD_REG_DWORD(&reg->host_int);
+	if (qla2x00_check_reg32_for_disconnect(vha, host_int))
+		goto out;
+	if (host_int) {
+		stat = RD_REG_DWORD(&reg->host_status);
+		switch (stat & 0xff) {
+		case 0x1:
+		case 0x2:
+		case 0x10:
+		case 0x11:
+			qla82xx_mbx_completion(vha, MSW(stat));
+			status |= MBX_INTERRUPT;
+			break;
+		case 0x12:
+			mb[0] = MSW(stat);
+			mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+			mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+			mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+			qla2x00_async_event(vha, rsp, mb);
+			break;
+		case 0x13:
+			qla24xx_process_response_queue(vha, rsp);
+			break;
+		default:
+			ql_dbg(ql_dbg_p3p, vha, 0xb013,
+			    "Unrecognized interrupt type (%d).\n",
+			    stat * 0xff);
+			break;
+		}
+		WRT_REG_DWORD(&reg->host_int, 0);
+	}
+out:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qla82xx_enable_intrs(struct qla_hw_data *ha)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	qla82xx_mbx_intr_enable(vha);
+	spin_lock_irq(&ha->hardware_lock);
+	if (IS_QLA8044(ha))
+		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
+	else
+		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+	spin_unlock_irq(&ha->hardware_lock);
+	ha->interrupts_on = 1;
+}
+
+void
+qla82xx_disable_intrs(struct qla_hw_data *ha)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	qla82xx_mbx_intr_disable(vha);
+	spin_lock_irq(&ha->hardware_lock);
+	if (IS_QLA8044(ha))
+		qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
+	else
+		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+	spin_unlock_irq(&ha->hardware_lock);
+	ha->interrupts_on = 0;
+}
+
+void qla82xx_init_flags(struct qla_hw_data *ha)
+{
+	struct qla82xx_legacy_intr_set *nx_legacy_intr;
+
+	/* ISP 8021 initializations */
+	rwlock_init(&ha->hw_lock);
+	ha->qdr_sn_window = -1;
+	ha->ddr_mn_window = -1;
+	ha->curr_window = 255;
+	ha->portnum = PCI_FUNC(ha->pdev->devfn);
+	nx_legacy_intr = &legacy_intr[ha->portnum];
+	ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
+	ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
+	ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
+	ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
+}
+
+static inline void
+qla82xx_set_idc_version(scsi_qla_host_t *vha)
+{
+	int idc_ver;
+	uint32_t drv_active;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
+		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+		    QLA82XX_IDC_VERSION);
+		ql_log(ql_log_info, vha, 0xb082,
+		    "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
+	} else {
+		idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
+		if (idc_ver != QLA82XX_IDC_VERSION)
+			ql_log(ql_log_info, vha, 0xb083,
+			    "qla2xxx driver IDC version %d is not compatible "
+			    "with IDC version %d of the other drivers\n",
+			    QLA82XX_IDC_VERSION, idc_ver);
+	}
+}
+
+inline void
+qla82xx_set_drv_active(scsi_qla_host_t *vha)
+{
+	uint32_t drv_active;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+
+	/* If reset value is all FF's, initialize DRV_ACTIVE */
+	if (drv_active == 0xffffffff) {
+		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
+			QLA82XX_DRV_NOT_ACTIVE);
+		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	}
+	drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+inline void
+qla82xx_clear_drv_active(struct qla_hw_data *ha)
+{
+	uint32_t drv_active;
+
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
+}
+
+static inline int
+qla82xx_need_reset(struct qla_hw_data *ha)
+{
+	uint32_t drv_state;
+	int rval;
+
+	if (ha->flags.nic_core_reset_owner)
+		return 1;
+	else {
+		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+		rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+		return rval;
+	}
+}
+
+static inline void
+qla82xx_set_rst_ready(struct qla_hw_data *ha)
+{
+	uint32_t drv_state;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+
+	/* If reset value is all FF's, initialize DRV_STATE */
+	if (drv_state == 0xffffffff) {
+		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
+		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	}
+	drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+	ql_dbg(ql_dbg_init, vha, 0x00bb,
+	    "drv_state = 0x%08x.\n", drv_state);
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_clear_rst_ready(struct qla_hw_data *ha)
+{
+	uint32_t drv_state;
+
+	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
+}
+
+static inline void
+qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
+{
+	uint32_t qsnt_state;
+
+	qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+void
+qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t qsnt_state;
+
+	qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
+static int
+qla82xx_load_fw(scsi_qla_host_t *vha)
+{
+	int rst;
+	struct fw_blob *blob;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0x009f,
+		    "Error during CRB initialization.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+	udelay(500);
+
+	/* Bring QM and CAMRAM out of reset */
+	rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
+	rst &= ~((1 << 28) | (1 << 24));
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
+
+	/*
+	 * FW Load priority:
+	 * 1) Operational firmware residing in flash.
+	 * 2) Firmware via request-firmware interface (.bin file).
+	 */
+	if (ql2xfwloadbin == 2)
+		goto try_blob_fw;
+
+	ql_log(ql_log_info, vha, 0x00a0,
+	    "Attempting to load firmware from flash.\n");
+
+	if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
+		ql_log(ql_log_info, vha, 0x00a1,
+		    "Firmware loaded successfully from flash.\n");
+		return QLA_SUCCESS;
+	} else {
+		ql_log(ql_log_warn, vha, 0x0108,
+		    "Firmware load from flash failed.\n");
+	}
+
+try_blob_fw:
+	ql_log(ql_log_info, vha, 0x00a2,
+	    "Attempting to load firmware from blob.\n");
+
+	/* Load firmware blob. */
+	blob = ha->hablob = qla2x00_request_firmware(vha);
+	if (!blob) {
+		ql_log(ql_log_fatal, vha, 0x00a3,
+		    "Firmware image not present.\n");
+		goto fw_load_failed;
+	}
+
+	/* Validating firmware blob */
+	if (qla82xx_validate_firmware_blob(vha,
+		QLA82XX_FLASH_ROMIMAGE)) {
+		/* Fallback to URI format */
+		if (qla82xx_validate_firmware_blob(vha,
+			QLA82XX_UNIFIED_ROMIMAGE)) {
+			ql_log(ql_log_fatal, vha, 0x00a4,
+			    "No valid firmware image found.\n");
+			return QLA_FUNCTION_FAILED;
+		}
+	}
+
+	if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
+		ql_log(ql_log_info, vha, 0x00a5,
+		    "Firmware loaded successfully from binary blob.\n");
+		return QLA_SUCCESS;
+	}
+
+	ql_log(ql_log_fatal, vha, 0x00a6,
+	       "Firmware load failed for binary blob.\n");
+	blob->fw = NULL;
+	blob = NULL;
+
+fw_load_failed:
+	return QLA_FUNCTION_FAILED;
+}
+
+int
+qla82xx_start_firmware(scsi_qla_host_t *vha)
+{
+	uint16_t      lnk;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* scrub dma mask expansion register */
+	qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
+
+	/* Put both the PEG CMD and RCV PEG to default state
+	 * of 0 before resetting the hardware
+	 */
+	qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
+	qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
+
+	/* Overwrite stale initialization register values */
+	qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
+	qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
+
+	if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0x00a7,
+		    "Error trying to start fw.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Handshake with the card before we register the devices. */
+	if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0x00aa,
+		    "Error during card handshake.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Negotiated Link width */
+	pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
+	ha->link_width = (lnk >> 4) & 0x3f;
+
+	/* Synchronize with Receive peg */
+	return qla82xx_check_rcvpeg_state(ha);
+}
+
+static uint32_t *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+	uint32_t length)
+{
+	uint32_t i;
+	uint32_t val;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Dword reads to flash. */
+	for (i = 0; i < length/4; i++, faddr += 4) {
+		if (qla82xx_rom_fast_read(ha, faddr, &val)) {
+			ql_log(ql_log_warn, vha, 0x0106,
+			    "Do ROM fast read failed.\n");
+			goto done_read;
+		}
+		dwptr[i] = cpu_to_le32(val);
+	}
+done_read:
+	return dwptr;
+}
+
+static int
+qla82xx_unprotect_flash(struct qla_hw_data *ha)
+{
+	int ret;
+	uint32_t val;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	ret = ql82xx_rom_lock_d(ha);
+	if (ret < 0) {
+		ql_log(ql_log_warn, vha, 0xb014,
+		    "ROM Lock failed.\n");
+		return ret;
+	}
+
+	ret = qla82xx_read_status_reg(ha, &val);
+	if (ret < 0)
+		goto done_unprotect;
+
+	val &= ~(BLOCK_PROTECT_BITS << 2);
+	ret = qla82xx_write_status_reg(ha, val);
+	if (ret < 0) {
+		val |= (BLOCK_PROTECT_BITS << 2);
+		qla82xx_write_status_reg(ha, val);
+	}
+
+	if (qla82xx_write_disable_flash(ha) != 0)
+		ql_log(ql_log_warn, vha, 0xb015,
+		    "Write disable failed.\n");
+
+done_unprotect:
+	qla82xx_rom_unlock(ha);
+	return ret;
+}
+
+static int
+qla82xx_protect_flash(struct qla_hw_data *ha)
+{
+	int ret;
+	uint32_t val;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	ret = ql82xx_rom_lock_d(ha);
+	if (ret < 0) {
+		ql_log(ql_log_warn, vha, 0xb016,
+		    "ROM Lock failed.\n");
+		return ret;
+	}
+
+	ret = qla82xx_read_status_reg(ha, &val);
+	if (ret < 0)
+		goto done_protect;
+
+	val |= (BLOCK_PROTECT_BITS << 2);
+	/* LOCK all sectors */
+	ret = qla82xx_write_status_reg(ha, val);
+	if (ret < 0)
+		ql_log(ql_log_warn, vha, 0xb017,
+		    "Write status register failed.\n");
+
+	if (qla82xx_write_disable_flash(ha) != 0)
+		ql_log(ql_log_warn, vha, 0xb018,
+		    "Write disable failed.\n");
+done_protect:
+	qla82xx_rom_unlock(ha);
+	return ret;
+}
+
+static int
+qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
+{
+	int ret = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	ret = ql82xx_rom_lock_d(ha);
+	if (ret < 0) {
+		ql_log(ql_log_warn, vha, 0xb019,
+		    "ROM Lock failed.\n");
+		return ret;
+	}
+
+	qla82xx_flash_set_write_enable(ha);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
+	qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
+
+	if (qla82xx_wait_rom_done(ha)) {
+		ql_log(ql_log_warn, vha, 0xb01a,
+		    "Error waiting for rom done.\n");
+		ret = -1;
+		goto done;
+	}
+	ret = qla82xx_flash_wait_write_finish(ha);
+done:
+	qla82xx_rom_unlock(ha);
+	return ret;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+	uint32_t offset, uint32_t length)
+{
+	scsi_block_requests(vha->host);
+	qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+	scsi_unblock_requests(vha->host);
+	return buf;
+}
+
+static int
+qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+	uint32_t faddr, uint32_t dwords)
+{
+	int ret;
+	uint32_t liter;
+	uint32_t rest_addr;
+	dma_addr_t optrom_dma;
+	void *optrom = NULL;
+	int page_mode = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret = -1;
+
+	/* Prepare burst-capable write on supported ISPs. */
+	if (page_mode && !(faddr & 0xfff) &&
+	    dwords > OPTROM_BURST_DWORDS) {
+		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+		    &optrom_dma, GFP_KERNEL);
+		if (!optrom) {
+			ql_log(ql_log_warn, vha, 0xb01b,
+			    "Unable to allocate memory "
+			    "for optrom burst write (%x KB).\n",
+			    OPTROM_BURST_SIZE / 1024);
+		}
+	}
+
+	rest_addr = ha->fdt_block_size - 1;
+
+	ret = qla82xx_unprotect_flash(ha);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xb01c,
+		    "Unable to unprotect flash for update.\n");
+		goto write_done;
+	}
+
+	for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+		/* Are we at the beginning of a sector? */
+		if ((faddr & rest_addr) == 0) {
+
+			ret = qla82xx_erase_sector(ha, faddr);
+			if (ret) {
+				ql_log(ql_log_warn, vha, 0xb01d,
+				    "Unable to erase sector: address=%x.\n",
+				    faddr);
+				break;
+			}
+		}
+
+		/* Go with burst-write. */
+		if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+			/* Copy data to DMA'ble buffer. */
+			memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+			ret = qla2x00_load_ram(vha, optrom_dma,
+			    (ha->flash_data_off | faddr),
+			    OPTROM_BURST_DWORDS);
+			if (ret != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0xb01e,
+				    "Unable to burst-write optrom segment "
+				    "(%x/%x/%llx).\n", ret,
+				    (ha->flash_data_off | faddr),
+				    (unsigned long long)optrom_dma);
+				ql_log(ql_log_warn, vha, 0xb01f,
+				    "Reverting to slow-write.\n");
+
+				dma_free_coherent(&ha->pdev->dev,
+				    OPTROM_BURST_SIZE, optrom, optrom_dma);
+				optrom = NULL;
+			} else {
+				liter += OPTROM_BURST_DWORDS - 1;
+				faddr += OPTROM_BURST_DWORDS - 1;
+				dwptr += OPTROM_BURST_DWORDS - 1;
+				continue;
+			}
+		}
+
+		ret = qla82xx_write_flash_dword(ha, faddr,
+		    cpu_to_le32(*dwptr));
+		if (ret) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb020,
+			    "Unable to program flash address=%x data=%x.\n",
+			    faddr, *dwptr);
+			break;
+		}
+	}
+
+	ret = qla82xx_protect_flash(ha);
+	if (ret)
+		ql_log(ql_log_warn, vha, 0xb021,
+		    "Unable to protect flash after update.\n");
+write_done:
+	if (optrom)
+		dma_free_coherent(&ha->pdev->dev,
+		    OPTROM_BURST_SIZE, optrom, optrom_dma);
+	return ret;
+}
+
+int
+qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+	uint32_t offset, uint32_t length)
+{
+	int rval;
+
+	/* Suspend HBA. */
+	scsi_block_requests(vha->host);
+	rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
+		length >> 2);
+	scsi_unblock_requests(vha->host);
+
+	/* Convert return ISP82xx to generic */
+	if (rval)
+		rval = QLA_FUNCTION_FAILED;
+	else
+		rval = QLA_SUCCESS;
+	return rval;
+}
+
+void
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	uint32_t dbval;
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	dbval = 0x04 | (ha->portnum << 5);
+
+	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+	if (ql2xdbwr)
+		qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
+	else {
+		WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+		wmb();
+		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+			WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+			wmb();
+		}
+	}
+}
+
+static void
+qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	uint32_t lock_owner = 0;
+
+	if (qla82xx_rom_lock(ha)) {
+		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+		/* Someone else is holding the lock. */
+		ql_log(ql_log_info, vha, 0xb022,
+		    "Resetting rom_lock, Lock Owner %u.\n", lock_owner);
+	}
+	/*
+	 * Either we got the lock, or someone
+	 * else died while holding it.
+	 * In either case, unlock.
+	 */
+	qla82xx_rom_unlock(ha);
+}
+
+/*
+ * qla82xx_device_bootstrap
+ *    Initialize device, set DEV_READY, start fw
+ *
+ * Note:
+ *      IDC lock must be held upon entry
+ *
+ * Return:
+ *    Success : 0
+ *    Failed  : 1
+ */
+static int
+qla82xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	int i;
+	uint32_t old_count, count;
+	struct qla_hw_data *ha = vha->hw;
+	int need_reset = 0;
+
+	need_reset = qla82xx_need_reset(ha);
+
+	if (need_reset) {
+		/* We are trying to perform a recovery here. */
+		if (ha->flags.isp82xx_fw_hung)
+			qla82xx_rom_lock_recovery(ha);
+	} else  {
+		old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+		for (i = 0; i < 10; i++) {
+			msleep(200);
+			count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+			if (count != old_count) {
+				rval = QLA_SUCCESS;
+				goto dev_ready;
+			}
+		}
+		qla82xx_rom_lock_recovery(ha);
+	}
+
+	/* set to DEV_INITIALIZING */
+	ql_log(ql_log_info, vha, 0x009e,
+	    "HW State: INITIALIZING.\n");
+	qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
+
+	qla82xx_idc_unlock(ha);
+	rval = qla82xx_start_firmware(vha);
+	qla82xx_idc_lock(ha);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0x00ad,
+		    "HW State: FAILED.\n");
+		qla82xx_clear_drv_active(ha);
+		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
+		return rval;
+	}
+
+dev_ready:
+	ql_log(ql_log_info, vha, 0x00ae,
+	    "HW State: READY.\n");
+	qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
+
+	return QLA_SUCCESS;
+}
+
+/*
+* qla82xx_need_qsnt_handler
+*    Code to start quiescence sequence
+*
+* Note:
+*      IDC lock must be held upon entry
+*
+* Return: void
+*/
+
+static void
+qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t dev_state, drv_state, drv_active;
+	unsigned long reset_timeout;
+
+	if (vha->flags.online) {
+		/*Block any further I/O and wait for pending cmnds to complete*/
+		qla2x00_quiesce_io(vha);
+	}
+
+	/* Set the quiescence ready bit */
+	qla82xx_set_qsnt_ready(ha);
+
+	/*wait for 30 secs for other functions to ack */
+	reset_timeout = jiffies + (30 * HZ);
+
+	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	/* Its 2 that is written when qsnt is acked, moving one bit */
+	drv_active = drv_active << 0x01;
+
+	while (drv_state != drv_active) {
+
+		if (time_after_eq(jiffies, reset_timeout)) {
+			/* quiescence timeout, other functions didn't ack
+			 * changing the state to DEV_READY
+			 */
+			ql_log(ql_log_info, vha, 0xb023,
+			    "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+			    "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
+			    drv_active, drv_state);
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+			    QLA8XXX_DEV_READY);
+			ql_log(ql_log_info, vha, 0xb025,
+			    "HW State: DEV_READY.\n");
+			qla82xx_idc_unlock(ha);
+			qla2x00_perform_loop_resync(vha);
+			qla82xx_idc_lock(ha);
+
+			qla82xx_clear_qsnt_ready(vha);
+			return;
+		}
+
+		qla82xx_idc_unlock(ha);
+		msleep(1000);
+		qla82xx_idc_lock(ha);
+
+		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+		drv_active = drv_active << 0x01;
+	}
+	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	/* everyone acked so set the state to DEV_QUIESCENCE */
+	if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+		ql_log(ql_log_info, vha, 0xb026,
+		    "HW State: DEV_QUIESCENT.\n");
+		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
+	}
+}
+
+/*
+* qla82xx_wait_for_state_change
+*    Wait for device state to change from given current state
+*
+* Note:
+*     IDC lock must not be held upon entry
+*
+* Return:
+*    Changed device state.
+*/
+uint32_t
+qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t dev_state;
+
+	do {
+		msleep(1000);
+		qla82xx_idc_lock(ha);
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		qla82xx_idc_unlock(ha);
+	} while (dev_state == curr_state);
+
+	return dev_state;
+}
+
+void
+qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Disable the board */
+	ql_log(ql_log_fatal, vha, 0x00b8,
+	    "Disabling the board.\n");
+
+	if (IS_QLA82XX(ha)) {
+		qla82xx_clear_drv_active(ha);
+		qla82xx_idc_unlock(ha);
+	} else if (IS_QLA8044(ha)) {
+		qla8044_clear_drv_active(ha);
+		qla8044_idc_unlock(ha);
+	}
+
+	/* Set DEV_FAILED flag to disable timer */
+	vha->device_flags |= DFLG_DEV_FAILED;
+	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+	qla2x00_mark_all_devices_lost(vha, 0);
+	vha->flags.online = 0;
+	vha->flags.init_done = 0;
+}
+
+/*
+ * qla82xx_need_reset_handler
+ *    Code to start reset sequence
+ *
+ * Note:
+ *      IDC lock must be held upon entry
+ *
+ * Return:
+ *    Success : 0
+ *    Failed  : 1
+ */
+static void
+qla82xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+	uint32_t dev_state, drv_state, drv_active;
+	uint32_t active_mask = 0;
+	unsigned long reset_timeout;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	if (vha->flags.online) {
+		qla82xx_idc_unlock(ha);
+		qla2x00_abort_isp_cleanup(vha);
+		ha->isp_ops->get_flash_version(vha, req->ring);
+		ha->isp_ops->nvram_config(vha);
+		qla82xx_idc_lock(ha);
+	}
+
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	if (!ha->flags.nic_core_reset_owner) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb028,
+		    "reset_acknowledged by 0x%x\n", ha->portnum);
+		qla82xx_set_rst_ready(ha);
+	} else {
+		active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
+		drv_active &= active_mask;
+		ql_dbg(ql_dbg_p3p, vha, 0xb029,
+		    "active_mask: 0x%08x\n", active_mask);
+	}
+
+	/* wait for 10 seconds for reset ack from all functions */
+	reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb02a,
+	    "drv_state: 0x%08x, drv_active: 0x%08x, "
+	    "dev_state: 0x%08x, active_mask: 0x%08x\n",
+	    drv_state, drv_active, dev_state, active_mask);
+
+	while (drv_state != drv_active &&
+	    dev_state != QLA8XXX_DEV_INITIALIZING) {
+		if (time_after_eq(jiffies, reset_timeout)) {
+			ql_log(ql_log_warn, vha, 0x00b5,
+			    "Reset timeout.\n");
+			break;
+		}
+		qla82xx_idc_unlock(ha);
+		msleep(1000);
+		qla82xx_idc_lock(ha);
+		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+		if (ha->flags.nic_core_reset_owner)
+			drv_active &= active_mask;
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	}
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb02b,
+	    "drv_state: 0x%08x, drv_active: 0x%08x, "
+	    "dev_state: 0x%08x, active_mask: 0x%08x\n",
+	    drv_state, drv_active, dev_state, active_mask);
+
+	ql_log(ql_log_info, vha, 0x00b6,
+	    "Device state is 0x%x = %s.\n",
+	    dev_state,
+	    dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+
+	/* Force to DEV_COLD unless someone else is starting a reset */
+	if (dev_state != QLA8XXX_DEV_INITIALIZING &&
+	    dev_state != QLA8XXX_DEV_COLD) {
+		ql_log(ql_log_info, vha, 0x00b7,
+		    "HW State: COLD/RE-INIT.\n");
+		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
+		qla82xx_set_rst_ready(ha);
+		if (ql2xmdenable) {
+			if (qla82xx_md_collect(vha))
+				ql_log(ql_log_warn, vha, 0xb02c,
+				    "Minidump not collected.\n");
+		} else
+			ql_log(ql_log_warn, vha, 0xb04f,
+			    "Minidump disabled.\n");
+	}
+}
+
+int
+qla82xx_check_md_needed(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
+	int rval = QLA_SUCCESS;
+
+	fw_major_version = ha->fw_major_version;
+	fw_minor_version = ha->fw_minor_version;
+	fw_subminor_version = ha->fw_subminor_version;
+
+	rval = qla2x00_get_fw_version(vha);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	if (ql2xmdenable) {
+		if (!ha->fw_dumped) {
+			if ((fw_major_version != ha->fw_major_version ||
+			    fw_minor_version != ha->fw_minor_version ||
+			    fw_subminor_version != ha->fw_subminor_version) ||
+			    (ha->prev_minidump_failed)) {
+				ql_dbg(ql_dbg_p3p, vha, 0xb02d,
+				    "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
+				    fw_major_version, fw_minor_version,
+				    fw_subminor_version,
+				    ha->fw_major_version,
+				    ha->fw_minor_version,
+				    ha->fw_subminor_version,
+				    ha->prev_minidump_failed);
+				/* Release MiniDump resources */
+				qla82xx_md_free(vha);
+				/* ALlocate MiniDump resources */
+				qla82xx_md_prep(vha);
+			}
+		} else
+			ql_log(ql_log_info, vha, 0xb02e,
+			    "Firmware dump available to retrieve\n");
+	}
+	return rval;
+}
+
+
+static int
+qla82xx_check_fw_alive(scsi_qla_host_t *vha)
+{
+	uint32_t fw_heartbeat_counter;
+	int status = 0;
+
+	fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
+		QLA82XX_PEG_ALIVE_COUNTER);
+	/* all 0xff, assume AER/EEH in progress, ignore */
+	if (fw_heartbeat_counter == 0xffffffff) {
+		ql_dbg(ql_dbg_timer, vha, 0x6003,
+		    "FW heartbeat counter is 0xffffffff, "
+		    "returning status=%d.\n", status);
+		return status;
+	}
+	if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+		vha->seconds_since_last_heartbeat++;
+		/* FW not alive after 2 seconds */
+		if (vha->seconds_since_last_heartbeat == 2) {
+			vha->seconds_since_last_heartbeat = 0;
+			status = 1;
+		}
+	} else
+		vha->seconds_since_last_heartbeat = 0;
+	vha->fw_heartbeat_counter = fw_heartbeat_counter;
+	if (status)
+		ql_dbg(ql_dbg_timer, vha, 0x6004,
+		    "Returning status=%d.\n", status);
+	return status;
+}
+
+/*
+ * qla82xx_device_state_handler
+ *	Main state handler
+ *
+ * Note:
+ *      IDC lock must be held upon entry
+ *
+ * Return:
+ *    Success : 0
+ *    Failed  : 1
+ */
+int
+qla82xx_device_state_handler(scsi_qla_host_t *vha)
+{
+	uint32_t dev_state;
+	uint32_t old_dev_state;
+	int rval = QLA_SUCCESS;
+	unsigned long dev_init_timeout;
+	struct qla_hw_data *ha = vha->hw;
+	int loopcount = 0;
+
+	qla82xx_idc_lock(ha);
+	if (!vha->flags.init_done) {
+		qla82xx_set_drv_active(vha);
+		qla82xx_set_idc_version(vha);
+	}
+
+	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	old_dev_state = dev_state;
+	ql_log(ql_log_info, vha, 0x009b,
+	    "Device state is 0x%x = %s.\n",
+	    dev_state,
+	    dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+
+	/* wait for 30 seconds for device to go ready */
+	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+	while (1) {
+
+		if (time_after_eq(jiffies, dev_init_timeout)) {
+			ql_log(ql_log_fatal, vha, 0x009c,
+			    "Device init failed.\n");
+			rval = QLA_FUNCTION_FAILED;
+			break;
+		}
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		if (old_dev_state != dev_state) {
+			loopcount = 0;
+			old_dev_state = dev_state;
+		}
+		if (loopcount < 5) {
+			ql_log(ql_log_info, vha, 0x009d,
+			    "Device state is 0x%x = %s.\n",
+			    dev_state,
+			    dev_state < MAX_STATES ? qdev_state(dev_state) :
+			    "Unknown");
+		}
+
+		switch (dev_state) {
+		case QLA8XXX_DEV_READY:
+			ha->flags.nic_core_reset_owner = 0;
+			goto rel_lock;
+		case QLA8XXX_DEV_COLD:
+			rval = qla82xx_device_bootstrap(vha);
+			break;
+		case QLA8XXX_DEV_INITIALIZING:
+			qla82xx_idc_unlock(ha);
+			msleep(1000);
+			qla82xx_idc_lock(ha);
+			break;
+		case QLA8XXX_DEV_NEED_RESET:
+			if (!ql2xdontresethba)
+				qla82xx_need_reset_handler(vha);
+			else {
+				qla82xx_idc_unlock(ha);
+				msleep(1000);
+				qla82xx_idc_lock(ha);
+			}
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_dev_init_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_NEED_QUIESCENT:
+			qla82xx_need_qsnt_handler(vha);
+			/* Reset timeout value after quiescence handler */
+			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+							 * HZ);
+			break;
+		case QLA8XXX_DEV_QUIESCENT:
+			/* Owner will exit and other will wait for the state
+			 * to get changed
+			 */
+			if (ha->flags.quiesce_owner)
+				goto rel_lock;
+
+			qla82xx_idc_unlock(ha);
+			msleep(1000);
+			qla82xx_idc_lock(ha);
+
+			/* Reset timeout value after quiescence handler */
+			dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+							 * HZ);
+			break;
+		case QLA8XXX_DEV_FAILED:
+			qla8xxx_dev_failed_handler(vha);
+			rval = QLA_FUNCTION_FAILED;
+			goto exit;
+		default:
+			qla82xx_idc_unlock(ha);
+			msleep(1000);
+			qla82xx_idc_lock(ha);
+		}
+		loopcount++;
+	}
+rel_lock:
+	qla82xx_idc_unlock(ha);
+exit:
+	return rval;
+}
+
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+	uint32_t temp, temp_state, temp_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+	temp_state = qla82xx_get_temp_state(temp);
+	temp_val = qla82xx_get_temp_val(temp);
+
+	if (temp_state == QLA82XX_TEMP_PANIC) {
+		ql_log(ql_log_warn, vha, 0x600e,
+		    "Device temperature %d degrees C exceeds "
+		    " maximum allowed. Hardware has been shut down.\n",
+		    temp_val);
+		return 1;
+	} else if (temp_state == QLA82XX_TEMP_WARN) {
+		ql_log(ql_log_warn, vha, 0x600f,
+		    "Device temperature %d degrees C exceeds "
+		    "operating range. Immediate action needed.\n",
+		    temp_val);
+	}
+	return 0;
+}
+
+int qla82xx_read_temperature(scsi_qla_host_t *vha)
+{
+	uint32_t temp;
+
+	temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
+	return qla82xx_get_temp_val(temp);
+}
+
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (ha->flags.mbox_busy) {
+		ha->flags.mbox_int = 1;
+		ha->flags.mbox_busy = 0;
+		ql_log(ql_log_warn, vha, 0x6010,
+		    "Doing premature completion of mbx command.\n");
+		if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+			complete(&ha->mbx_intr_comp);
+	}
+}
+
+void qla82xx_watchdog(scsi_qla_host_t *vha)
+{
+	uint32_t dev_state, halt_status;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* don't poll if reset is going on */
+	if (!ha->flags.nic_core_reset_hdlr_active) {
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		if (qla82xx_check_temp(vha)) {
+			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+			ha->flags.isp82xx_fw_hung = 1;
+			qla82xx_clear_pending_mbx(vha);
+		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+		    !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+			ql_log(ql_log_warn, vha, 0x6001,
+			    "Adapter reset needed.\n");
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+			!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+			ql_log(ql_log_warn, vha, 0x6002,
+			    "Quiescent needed.\n");
+			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+		} else if (dev_state == QLA8XXX_DEV_FAILED &&
+			!test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
+			vha->flags.online == 1) {
+			ql_log(ql_log_warn, vha, 0xb055,
+			    "Adapter state is failed. Offlining.\n");
+			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+			ha->flags.isp82xx_fw_hung = 1;
+			qla82xx_clear_pending_mbx(vha);
+		} else {
+			if (qla82xx_check_fw_alive(vha)) {
+				ql_dbg(ql_dbg_timer, vha, 0x6011,
+				    "disabling pause transmit on port 0 & 1.\n");
+				qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+				    CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
+				halt_status = qla82xx_rd_32(ha,
+				    QLA82XX_PEG_HALT_STATUS1);
+				ql_log(ql_log_info, vha, 0x6005,
+				    "dumping hw/fw registers:.\n "
+				    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
+				    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
+				    " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
+				    " PEG_NET_4_PC: 0x%x.\n", halt_status,
+				    qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
+				    qla82xx_rd_32(ha,
+					    QLA82XX_CRB_PEG_NET_0 + 0x3c),
+				    qla82xx_rd_32(ha,
+					    QLA82XX_CRB_PEG_NET_1 + 0x3c),
+				    qla82xx_rd_32(ha,
+					    QLA82XX_CRB_PEG_NET_2 + 0x3c),
+				    qla82xx_rd_32(ha,
+					    QLA82XX_CRB_PEG_NET_3 + 0x3c),
+				    qla82xx_rd_32(ha,
+					    QLA82XX_CRB_PEG_NET_4 + 0x3c));
+				if (((halt_status & 0x1fffff00) >> 8) == 0x67)
+					ql_log(ql_log_warn, vha, 0xb052,
+					    "Firmware aborted with "
+					    "error code 0x00006700. Device is "
+					    "being reset.\n");
+				if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+					set_bit(ISP_UNRECOVERABLE,
+					    &vha->dpc_flags);
+				} else {
+					ql_log(ql_log_info, vha, 0x6006,
+					    "Detect abort  needed.\n");
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+				}
+				ha->flags.isp82xx_fw_hung = 1;
+				ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+				qla82xx_clear_pending_mbx(vha);
+			}
+		}
+	}
+}
+
+int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+{
+	int rval = -1;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLA82XX(ha))
+		rval = qla82xx_device_state_handler(vha);
+	else if (IS_QLA8044(ha)) {
+		qla8044_idc_lock(ha);
+		/* Decide the reset ownership */
+		qla83xx_reset_ownership(vha);
+		qla8044_idc_unlock(ha);
+		rval = qla8044_device_state_handler(vha);
+	}
+	return rval;
+}
+
+void
+qla82xx_set_reset_owner(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t dev_state = 0;
+
+	if (IS_QLA82XX(ha))
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	else if (IS_QLA8044(ha))
+		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+	if (dev_state == QLA8XXX_DEV_READY) {
+		ql_log(ql_log_info, vha, 0xb02f,
+		    "HW State: NEED RESET\n");
+		if (IS_QLA82XX(ha)) {
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+			    QLA8XXX_DEV_NEED_RESET);
+			ha->flags.nic_core_reset_owner = 1;
+			ql_dbg(ql_dbg_p3p, vha, 0xb030,
+			    "reset_owner is 0x%x\n", ha->portnum);
+		} else if (IS_QLA8044(ha))
+			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+			    QLA8XXX_DEV_NEED_RESET);
+	} else
+		ql_log(ql_log_info, vha, 0xb031,
+		    "Device state is 0x%x = %s.\n",
+		    dev_state,
+		    dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+}
+
+/*
+ *  qla82xx_abort_isp
+ *      Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ *      ha           = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qla82xx_abort_isp(scsi_qla_host_t *vha)
+{
+	int rval = -1;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_log(ql_log_warn, vha, 0x8024,
+		    "Device in failed state, exiting.\n");
+		return QLA_SUCCESS;
+	}
+	ha->flags.nic_core_reset_hdlr_active = 1;
+
+	qla82xx_idc_lock(ha);
+	qla82xx_set_reset_owner(vha);
+	qla82xx_idc_unlock(ha);
+
+	if (IS_QLA82XX(ha))
+		rval = qla82xx_device_state_handler(vha);
+	else if (IS_QLA8044(ha)) {
+		qla8044_idc_lock(ha);
+		/* Decide the reset ownership */
+		qla83xx_reset_ownership(vha);
+		qla8044_idc_unlock(ha);
+		rval = qla8044_device_state_handler(vha);
+	}
+
+	qla82xx_idc_lock(ha);
+	qla82xx_clear_rst_ready(ha);
+	qla82xx_idc_unlock(ha);
+
+	if (rval == QLA_SUCCESS) {
+		ha->flags.isp82xx_fw_hung = 0;
+		ha->flags.nic_core_reset_hdlr_active = 0;
+		qla82xx_restart_isp(vha);
+	}
+
+	if (rval) {
+		vha->flags.online = 1;
+		if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+			if (ha->isp_abort_cnt == 0) {
+				ql_log(ql_log_warn, vha, 0x8027,
+				    "ISP error recover failed - board "
+				    "disabled.\n");
+				/*
+				 * The next call disables the board
+				 * completely.
+				 */
+				ha->isp_ops->reset_adapter(vha);
+				vha->flags.online = 0;
+				clear_bit(ISP_ABORT_RETRY,
+				    &vha->dpc_flags);
+				rval = QLA_SUCCESS;
+			} else { /* schedule another ISP abort */
+				ha->isp_abort_cnt--;
+				ql_log(ql_log_warn, vha, 0x8036,
+				    "ISP abort - retry remaining %d.\n",
+				    ha->isp_abort_cnt);
+				rval = QLA_FUNCTION_FAILED;
+			}
+		} else {
+			ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
+			ql_dbg(ql_dbg_taskm, vha, 0x8029,
+			    "ISP error recovery - retrying (%d) more times.\n",
+			    ha->isp_abort_cnt);
+			set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+			rval = QLA_FUNCTION_FAILED;
+		}
+	}
+	return rval;
+}
+
+/*
+ *  qla82xx_fcoe_ctx_reset
+ *      Perform a quick reset and aborts all outstanding commands.
+ *      This will only perform an FCoE context reset and avoids a full blown
+ *      chip reset.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *      is_reset_path = flag for identifying the reset path.
+ *
+ * Returns:
+ *      0 = success
+ */
+int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+	int rval = QLA_FUNCTION_FAILED;
+
+	if (vha->flags.online) {
+		/* Abort all outstanding commands, so as to be requeued later */
+		qla2x00_abort_isp_cleanup(vha);
+	}
+
+	/* Stop currently executing firmware.
+	 * This will destroy existing FCoE context at the F/W end.
+	 */
+	qla2x00_try_to_stop_firmware(vha);
+
+	/* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
+	rval = qla82xx_restart_isp(vha);
+
+	return rval;
+}
+
+/*
+ * qla2x00_wait_for_fcoe_ctx_reset
+ *    Wait till the FCoE context is reset.
+ *
+ * Note:
+ *    Does context switching here.
+ *    Release SPIN_LOCK (if any) before calling this routine.
+ *
+ * Return:
+ *    Success (fcoe_ctx reset is done) : 0
+ *    Failed  (fcoe_ctx reset not completed within max loop timout ) : 1
+ */
+int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
+{
+	int status = QLA_FUNCTION_FAILED;
+	unsigned long wait_reset;
+
+	wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+	while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+	    && time_before(jiffies, wait_reset)) {
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(HZ);
+
+		if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
+		    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+			status = QLA_SUCCESS;
+			break;
+		}
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb027,
+	       "%s: status=%d.\n", __func__, status);
+
+	return status;
+}
+
+void
+qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
+{
+	int i, fw_state = 0;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Check if 82XX firmware is alive or not
+	 * We may have arrived here from NEED_RESET
+	 * detection only
+	 */
+	if (!ha->flags.isp82xx_fw_hung) {
+		for (i = 0; i < 2; i++) {
+			msleep(1000);
+			if (IS_QLA82XX(ha))
+				fw_state = qla82xx_check_fw_alive(vha);
+			else if (IS_QLA8044(ha))
+				fw_state = qla8044_check_fw_alive(vha);
+			if (fw_state) {
+				ha->flags.isp82xx_fw_hung = 1;
+				qla82xx_clear_pending_mbx(vha);
+				break;
+			}
+		}
+	}
+	ql_dbg(ql_dbg_init, vha, 0x00b0,
+	    "Entered %s fw_hung=%d.\n",
+	    __func__, ha->flags.isp82xx_fw_hung);
+
+	/* Abort all commands gracefully if fw NOT hung */
+	if (!ha->flags.isp82xx_fw_hung) {
+		int cnt, que;
+		srb_t *sp;
+		struct req_que *req;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		for (que = 0; que < ha->max_req_queues; que++) {
+			req = ha->req_q_map[que];
+			if (!req)
+				continue;
+			for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+				sp = req->outstanding_cmds[cnt];
+				if (sp) {
+					if ((!sp->u.scmd.ctx ||
+					    (sp->flags &
+						SRB_FCP_CMND_DMA_VALID)) &&
+						!ha->flags.isp82xx_fw_hung) {
+						spin_unlock_irqrestore(
+						    &ha->hardware_lock, flags);
+						if (ha->isp_ops->abort_command(sp)) {
+							ql_log(ql_log_info, vha,
+							    0x00b1,
+							    "mbx abort failed.\n");
+						} else {
+							ql_log(ql_log_info, vha,
+							    0x00b2,
+							    "mbx abort success.\n");
+						}
+						spin_lock_irqsave(&ha->hardware_lock, flags);
+					}
+				}
+			}
+		}
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		/* Wait for pending cmds (physical and virtual) to complete */
+		if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+		    WAIT_HOST) == QLA_SUCCESS) {
+			ql_dbg(ql_dbg_init, vha, 0x00b3,
+			    "Done wait for "
+			    "pending commands.\n");
+		}
+	}
+}
+
+/* Minidump related functions */
+static int
+qla82xx_minidump_process_control(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla82xx_md_entry_crb *crb_entry;
+	uint32_t read_value, opcode, poll_time;
+	uint32_t addr, index, crb_addr;
+	unsigned long wtime;
+	struct qla82xx_md_template_hdr *tmplt_hdr;
+	uint32_t rval = QLA_SUCCESS;
+	int i;
+
+	tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+	crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
+	crb_addr = crb_entry->addr;
+
+	for (i = 0; i < crb_entry->op_count; i++) {
+		opcode = crb_entry->crb_ctrl.opcode;
+		if (opcode & QLA82XX_DBG_OPCODE_WR) {
+			qla82xx_md_rw_32(ha, crb_addr,
+			    crb_entry->value_1, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_WR;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RW) {
+			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+			qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_RW;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_AND) {
+			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+			read_value &= crb_entry->value_2;
+			opcode &= ~QLA82XX_DBG_OPCODE_AND;
+			if (opcode & QLA82XX_DBG_OPCODE_OR) {
+				read_value |= crb_entry->value_3;
+				opcode &= ~QLA82XX_DBG_OPCODE_OR;
+			}
+			qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_OR) {
+			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+			read_value |= crb_entry->value_3;
+			qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_OR;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+			poll_time = crb_entry->crb_strd.poll_timeout;
+			wtime = jiffies + poll_time;
+			read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
+
+			do {
+				if ((read_value & crb_entry->value_2)
+				    == crb_entry->value_1)
+					break;
+				else if (time_after_eq(jiffies, wtime)) {
+					/* capturing dump failed */
+					rval = QLA_FUNCTION_FAILED;
+					break;
+				} else
+					read_value = qla82xx_md_rw_32(ha,
+					    crb_addr, 0, 0);
+			} while (1);
+			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else
+				addr = crb_addr;
+
+			read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+			index = crb_entry->crb_ctrl.state_index_v;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else
+				addr = crb_addr;
+
+			if (crb_entry->crb_ctrl.state_index_v) {
+				index = crb_entry->crb_ctrl.state_index_v;
+				read_value =
+				    tmplt_hdr->saved_state_array[index];
+			} else
+				read_value = crb_entry->value_1;
+
+			qla82xx_md_rw_32(ha, addr, read_value, 1);
+			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+			index = crb_entry->crb_ctrl.state_index_v;
+			read_value = tmplt_hdr->saved_state_array[index];
+			read_value <<= crb_entry->crb_ctrl.shl;
+			read_value >>= crb_entry->crb_ctrl.shr;
+			if (crb_entry->value_2)
+				read_value &= crb_entry->value_2;
+			read_value |= crb_entry->value_3;
+			read_value += crb_entry->value_1;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+		}
+		crb_addr += crb_entry->crb_strd.addr_stride;
+	}
+	return rval;
+}
+
+static void
+qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla82xx_md_entry_rdocm *ocm_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
+	r_addr = ocm_hdr->read_addr;
+	r_stride = ocm_hdr->read_addr_stride;
+	loop_cnt = ocm_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += r_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+	struct qla82xx_md_entry_mux *mux_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
+	r_addr = mux_hdr->read_addr;
+	s_addr = mux_hdr->select_addr;
+	s_stride = mux_hdr->select_value_stride;
+	s_value = mux_hdr->select_value;
+	loop_cnt = mux_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla82xx_md_rw_32(ha, s_addr, s_value, 1);
+		r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+		*data_ptr++ = cpu_to_le32(s_value);
+		*data_ptr++ = cpu_to_le32(r_value);
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla82xx_md_entry_crb *crb_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
+	r_addr = crb_hdr->addr;
+	r_stride = crb_hdr->crb_strd.addr_stride;
+	loop_cnt = crb_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+		*data_ptr++ = cpu_to_le32(r_addr);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += r_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static int
+qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	unsigned long p_wait, w_time, p_mask;
+	uint32_t c_value_w, c_value_r;
+	struct qla82xx_md_entry_cache *cache_hdr;
+	int rval = QLA_FUNCTION_FAILED;
+	uint32_t *data_ptr = *d_ptr;
+
+	cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+	p_wait = cache_hdr->cache_ctrl.poll_wait;
+	p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+	for (i = 0; i < loop_count; i++) {
+		qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
+		if (c_value_w)
+			qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+		if (p_mask) {
+			w_time = jiffies + p_wait;
+			do {
+				c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
+				if ((c_value_r & p_mask) == 0)
+					break;
+				else if (time_after_eq(jiffies, w_time)) {
+					/* capturing dump failed */
+					ql_dbg(ql_dbg_p3p, vha, 0xb032,
+					    "c_value_r: 0x%x, poll_mask: 0x%lx, "
+					    "w_time: 0x%lx\n",
+					    c_value_r, p_mask, w_time);
+					return rval;
+				}
+			} while (1);
+		}
+
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+			*data_ptr++ = cpu_to_le32(r_value);
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static void
+qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	uint32_t c_value_w;
+	struct qla82xx_md_entry_cache *cache_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+	for (i = 0; i < loop_count; i++) {
+		qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
+		qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
+			*data_ptr++ = cpu_to_le32(r_value);
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t s_addr, r_addr;
+	uint32_t r_stride, r_value, r_cnt, qid = 0;
+	uint32_t i, k, loop_cnt;
+	struct qla82xx_md_entry_queue *q_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
+	s_addr = q_hdr->select_addr;
+	r_cnt = q_hdr->rd_strd.read_addr_cnt;
+	r_stride = q_hdr->rd_strd.read_addr_stride;
+	loop_cnt = q_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla82xx_md_rw_32(ha, s_addr, qid, 1);
+		r_addr = q_hdr->read_addr;
+		for (k = 0; k < r_cnt; k++) {
+			r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
+			*data_ptr++ = cpu_to_le32(r_value);
+			r_addr += r_stride;
+		}
+		qid += q_hdr->q_strd.queue_id_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t r_addr, r_value;
+	uint32_t i, loop_cnt;
+	struct qla82xx_md_entry_rdrom *rom_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
+	r_addr = rom_hdr->read_addr;
+	loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+		    (r_addr & 0xFFFF0000), 1);
+		r_value = qla82xx_md_rw_32(ha,
+		    MD_DIRECT_ROM_READ_BASE +
+		    (r_addr & 0x0000FFFF), 0, 0);
+		*data_ptr++ = cpu_to_le32(r_value);
+		r_addr += sizeof(uint32_t);
+	}
+	*d_ptr = data_ptr;
+}
+
+static int
+qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t r_addr, r_value, r_data;
+	uint32_t i, j, loop_cnt;
+	struct qla82xx_md_entry_rdmem *m_hdr;
+	unsigned long flags;
+	int rval = QLA_FUNCTION_FAILED;
+	uint32_t *data_ptr = *d_ptr;
+
+	m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
+	r_addr = m_hdr->read_addr;
+	loop_cnt = m_hdr->read_data_size/16;
+
+	if (r_addr & 0xf) {
+		ql_log(ql_log_warn, vha, 0xb033,
+		    "Read addr 0x%x not 16 bytes aligned\n", r_addr);
+		return rval;
+	}
+
+	if (m_hdr->read_data_size % 16) {
+		ql_log(ql_log_warn, vha, 0xb034,
+		    "Read data[0x%x] not multiple of 16 bytes\n",
+		    m_hdr->read_data_size);
+		return rval;
+	}
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb035,
+	    "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+	    __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+	for (i = 0; i < loop_cnt; i++) {
+		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+		r_value = 0;
+		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+		r_value = MIU_TA_CTL_ENABLE;
+		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+		r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+		qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			r_value = qla82xx_md_rw_32(ha,
+			    MD_MIU_TEST_AGT_CTRL, 0, 0);
+			if ((r_value & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			printk_ratelimited(KERN_ERR
+			    "failed to read through agent\n");
+			write_unlock_irqrestore(&ha->hw_lock, flags);
+			return rval;
+		}
+
+		for (j = 0; j < 4; j++) {
+			r_data = qla82xx_md_rw_32(ha,
+			    MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
+			*data_ptr++ = cpu_to_le32(r_data);
+		}
+		r_addr += 16;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+int
+qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint64_t chksum = 0;
+	uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
+	int count = ha->md_template_size/sizeof(uint32_t);
+
+	while (count-- > 0)
+		chksum += *d_ptr++;
+	while (chksum >> 32)
+		chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
+	return ~chksum;
+}
+
+static void
+qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
+	qla82xx_md_entry_hdr_t *entry_hdr, int index)
+{
+	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+	ql_dbg(ql_dbg_p3p, vha, 0xb036,
+	    "Skipping entry[%d]: "
+	    "ETYPE[0x%x]-ELEVEL[0x%x]\n",
+	    index, entry_hdr->entry_type,
+	    entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+int
+qla82xx_md_collect(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int no_entry_hdr = 0;
+	qla82xx_md_entry_hdr_t *entry_hdr;
+	struct qla82xx_md_template_hdr *tmplt_hdr;
+	uint32_t *data_ptr;
+	uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
+	int i = 0, rval = QLA_FUNCTION_FAILED;
+
+	tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+	data_ptr = (uint32_t *)ha->md_dump;
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xb037,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n", ha->fw_dump);
+		goto md_failed;
+	}
+
+	ha->fw_dumped = 0;
+
+	if (!ha->md_tmplt_hdr || !ha->md_dump) {
+		ql_log(ql_log_warn, vha, 0xb038,
+		    "Memory not allocated for minidump capture\n");
+		goto md_failed;
+	}
+
+	if (ha->flags.isp82xx_no_md_cap) {
+		ql_log(ql_log_warn, vha, 0xb054,
+		    "Forced reset from application, "
+		    "ignore minidump capture\n");
+		ha->flags.isp82xx_no_md_cap = 0;
+		goto md_failed;
+	}
+
+	if (qla82xx_validate_template_chksum(vha)) {
+		ql_log(ql_log_info, vha, 0xb039,
+		    "Template checksum validation error\n");
+		goto md_failed;
+	}
+
+	no_entry_hdr = tmplt_hdr->num_of_entries;
+	ql_dbg(ql_dbg_p3p, vha, 0xb03a,
+	    "No of entry headers in Template: 0x%x\n", no_entry_hdr);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb03b,
+	    "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+	f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+	/* Validate whether required debug level is set */
+	if ((f_capture_mask & 0x3) != 0x3) {
+		ql_log(ql_log_warn, vha, 0xb03c,
+		    "Minimum required capture mask[0x%x] level not set\n",
+		    f_capture_mask);
+		goto md_failed;
+	}
+	tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+
+	tmplt_hdr->driver_info[0] = vha->host_no;
+	tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
+	    (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
+	    QLA_DRIVER_BETA_VER;
+
+	total_data_size = ha->md_dump_size;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb03d,
+	    "Total minidump data_size 0x%x to be captured\n", total_data_size);
+
+	/* Check whether template obtained is valid */
+	if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
+		ql_log(ql_log_warn, vha, 0xb04e,
+		    "Bad template header entry type: 0x%x obtained\n",
+		    tmplt_hdr->entry_type);
+		goto md_failed;
+	}
+
+	entry_hdr = (qla82xx_md_entry_hdr_t *) \
+	    (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+
+	/* Walk through the entry headers */
+	for (i = 0; i < no_entry_hdr; i++) {
+
+		if (data_collected > total_data_size) {
+			ql_log(ql_log_warn, vha, 0xb03e,
+			    "More MiniDump data collected: [0x%x]\n",
+			    data_collected);
+			goto md_failed;
+		}
+
+		if (!(entry_hdr->d_ctrl.entry_capture_mask &
+		    ql2xmdcapmask)) {
+			entry_hdr->d_ctrl.driver_flags |=
+			    QLA82XX_DBG_SKIPPED_FLAG;
+			ql_dbg(ql_dbg_p3p, vha, 0xb03f,
+			    "Skipping entry[%d]: "
+			    "ETYPE[0x%x]-ELEVEL[0x%x]\n",
+			    i, entry_hdr->entry_type,
+			    entry_hdr->d_ctrl.entry_capture_mask);
+			goto skip_nxt_entry;
+		}
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb040,
+		    "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
+		    "entry_type: 0x%x, capture_mask: 0x%x\n",
+		    __func__, i, data_ptr, entry_hdr,
+		    entry_hdr->entry_type,
+		    entry_hdr->d_ctrl.entry_capture_mask);
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb041,
+		    "Data collected: [0x%x], Dump size left:[0x%x]\n",
+		    data_collected, (ha->md_dump_size - data_collected));
+
+		/* Decode the entry type and take
+		 * required action to capture debug data */
+		switch (entry_hdr->entry_type) {
+		case QLA82XX_RDEND:
+			qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA82XX_CNTRL:
+			rval = qla82xx_minidump_process_control(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_RDCRB:
+			qla82xx_minidump_process_rdcrb(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDMEM:
+			rval = qla82xx_minidump_process_rdmem(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_BOARD:
+		case QLA82XX_RDROM:
+			qla82xx_minidump_process_rdrom(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_L2DTG:
+		case QLA82XX_L2ITG:
+		case QLA82XX_L2DAT:
+		case QLA82XX_L2INS:
+			rval = qla82xx_minidump_process_l2tag(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_L1DAT:
+		case QLA82XX_L1INS:
+			qla82xx_minidump_process_l1cache(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDOCM:
+			qla82xx_minidump_process_rdocm(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDMUX:
+			qla82xx_minidump_process_rdmux(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_QUEUE:
+			qla82xx_minidump_process_queue(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDNOP:
+		default:
+			qla82xx_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		}
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb042,
+		    "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
+
+		data_collected = (uint8_t *)data_ptr -
+		    (uint8_t *)ha->md_dump;
+skip_nxt_entry:
+		entry_hdr = (qla82xx_md_entry_hdr_t *) \
+		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+	}
+
+	if (data_collected != total_data_size) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb043,
+		    "MiniDump data mismatch: Data collected: [0x%x],"
+		    "total_data_size:[0x%x]\n",
+		    data_collected, total_data_size);
+		goto md_failed;
+	}
+
+	ql_log(ql_log_info, vha, 0xb044,
+	    "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+	    vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+	ha->fw_dumped = 1;
+	qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+md_failed:
+	return rval;
+}
+
+int
+qla82xx_md_alloc(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int i, k;
+	struct qla82xx_md_template_hdr *tmplt_hdr;
+
+	tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
+
+	if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
+		ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
+		ql_log(ql_log_info, vha, 0xb045,
+		    "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
+		    ql2xmdcapmask);
+	}
+
+	for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
+		if (i & ql2xmdcapmask)
+			ha->md_dump_size += tmplt_hdr->capture_size_array[k];
+	}
+
+	if (ha->md_dump) {
+		ql_log(ql_log_warn, vha, 0xb046,
+		    "Firmware dump previously allocated.\n");
+		return 1;
+	}
+
+	ha->md_dump = vmalloc(ha->md_dump_size);
+	if (ha->md_dump == NULL) {
+		ql_log(ql_log_warn, vha, 0xb047,
+		    "Unable to allocate memory for Minidump size "
+		    "(0x%x).\n", ha->md_dump_size);
+		return 1;
+	}
+	return 0;
+}
+
+void
+qla82xx_md_free(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Release the template header allocated */
+	if (ha->md_tmplt_hdr) {
+		ql_log(ql_log_info, vha, 0xb048,
+		    "Free MiniDump template: %p, size (%d KB)\n",
+		    ha->md_tmplt_hdr, ha->md_template_size / 1024);
+		dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
+		    ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
+		ha->md_tmplt_hdr = NULL;
+	}
+
+	/* Release the template data buffer allocated */
+	if (ha->md_dump) {
+		ql_log(ql_log_info, vha, 0xb049,
+		    "Free MiniDump memory: %p, size (%d KB)\n",
+		    ha->md_dump, ha->md_dump_size / 1024);
+		vfree(ha->md_dump);
+		ha->md_dump_size = 0;
+		ha->md_dump = NULL;
+	}
+}
+
+void
+qla82xx_md_prep(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval;
+
+	/* Get Minidump template size */
+	rval = qla82xx_md_get_template_size(vha);
+	if (rval == QLA_SUCCESS) {
+		ql_log(ql_log_info, vha, 0xb04a,
+		    "MiniDump Template size obtained (%d KB)\n",
+		    ha->md_template_size / 1024);
+
+		/* Get Minidump template */
+		if (IS_QLA8044(ha))
+			rval = qla8044_md_get_template(vha);
+		else
+			rval = qla82xx_md_get_template(vha);
+
+		if (rval == QLA_SUCCESS) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb04b,
+			    "MiniDump Template obtained\n");
+
+			/* Allocate memory for minidump */
+			rval = qla82xx_md_alloc(vha);
+			if (rval == QLA_SUCCESS)
+				ql_log(ql_log_info, vha, 0xb04c,
+				    "MiniDump memory allocated (%d KB)\n",
+				    ha->md_dump_size / 1024);
+			else {
+				ql_log(ql_log_info, vha, 0xb04d,
+				    "Free MiniDump template: %p, size: (%d KB)\n",
+				    ha->md_tmplt_hdr,
+				    ha->md_template_size / 1024);
+				dma_free_coherent(&ha->pdev->dev,
+				    ha->md_template_size,
+				    ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
+				ha->md_tmplt_hdr = NULL;
+			}
+
+		}
+	}
+}
+
+int
+qla82xx_beacon_on(struct scsi_qla_host *vha)
+{
+
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	qla82xx_idc_lock(ha);
+	rval = qla82xx_mbx_beacon_ctl(vha, 1);
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0xb050,
+		    "mbx set led config failed in %s\n", __func__);
+		goto exit;
+	}
+	ha->beacon_blink_led = 1;
+exit:
+	qla82xx_idc_unlock(ha);
+	return rval;
+}
+
+int
+qla82xx_beacon_off(struct scsi_qla_host *vha)
+{
+
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+	qla82xx_idc_lock(ha);
+	rval = qla82xx_mbx_beacon_ctl(vha, 0);
+
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0xb051,
+		    "mbx set led config failed in %s\n", __func__);
+		goto exit;
+	}
+	ha->beacon_blink_led = 0;
+exit:
+	qla82xx_idc_unlock(ha);
+	return rval;
+}
+
+void
+qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->allow_cna_fw_dump)
+		return;
+
+	scsi_block_requests(vha->host);
+	ha->flags.isp82xx_no_md_cap = 1;
+	qla82xx_idc_lock(ha);
+	qla82xx_set_reset_owner(vha);
+	qla82xx_idc_unlock(ha);
+	qla2x00_wait_for_chip_reset(vha);
+	scsi_unblock_requests(vha->host);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx.h
new file mode 100644
index 0000000..71a4109
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx.h
@@ -0,0 +1,1187 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NX_H
+#define __QLA_NX_H
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+*/
+#define PHAN_INITIALIZE_FAILED	      0xffff
+#define PHAN_INITIALIZE_COMPLETE      0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK	      0xf00f
+#define PHAN_PEG_RCV_INITIALIZED      0xff01
+
+/*CRB_RELATED*/
+#define QLA82XX_CRB_BASE	QLA82XX_CAM_RAM(0x200)
+#define QLA82XX_REG(X)		(QLA82XX_CRB_BASE+(X))
+
+#define CRB_CMDPEG_STATE		QLA82XX_REG(0x50)
+#define CRB_RCVPEG_STATE		QLA82XX_REG(0x13c)
+#define BOOT_LOADER_DIMM_STATUS		QLA82XX_REG(0x54)
+#define CRB_DMA_SHIFT			QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE			QLA82XX_REG(0x1b4)
+#define QLA82XX_DMA_SHIFT_VALUE		0x55555555
+
+#define QLA82XX_HW_H0_CH_HUB_ADR    0x05
+#define QLA82XX_HW_H1_CH_HUB_ADR    0x0E
+#define QLA82XX_HW_H2_CH_HUB_ADR    0x03
+#define QLA82XX_HW_H3_CH_HUB_ADR    0x01
+#define QLA82XX_HW_H4_CH_HUB_ADR    0x06
+#define QLA82XX_HW_H5_CH_HUB_ADR    0x07
+#define QLA82XX_HW_H6_CH_HUB_ADR    0x08
+
+/*  Hub 0 */
+#define QLA82XX_HW_MN_CRB_AGT_ADR   0x15
+#define QLA82XX_HW_MS_CRB_AGT_ADR   0x25
+
+/*  Hub 1 */
+#define QLA82XX_HW_PS_CRB_AGT_ADR	0x73
+#define QLA82XX_HW_QMS_CRB_AGT_ADR	0x00
+#define QLA82XX_HW_RPMX3_CRB_AGT_ADR	0x0b
+#define QLA82XX_HW_SQGS0_CRB_AGT_ADR	0x01
+#define QLA82XX_HW_SQGS1_CRB_AGT_ADR	0x02
+#define QLA82XX_HW_SQGS2_CRB_AGT_ADR	0x03
+#define QLA82XX_HW_SQGS3_CRB_AGT_ADR	0x04
+#define QLA82XX_HW_C2C0_CRB_AGT_ADR	0x58
+#define QLA82XX_HW_C2C1_CRB_AGT_ADR	0x59
+#define QLA82XX_HW_C2C2_CRB_AGT_ADR	0x5a
+#define QLA82XX_HW_RPMX2_CRB_AGT_ADR	0x0a
+#define QLA82XX_HW_RPMX4_CRB_AGT_ADR	0x0c
+#define QLA82XX_HW_RPMX7_CRB_AGT_ADR	0x0f
+#define QLA82XX_HW_RPMX9_CRB_AGT_ADR	0x12
+#define QLA82XX_HW_SMB_CRB_AGT_ADR	0x18
+
+/*  Hub 2 */
+#define QLA82XX_HW_NIU_CRB_AGT_ADR	0x31
+#define QLA82XX_HW_I2C0_CRB_AGT_ADR	0x19
+#define QLA82XX_HW_I2C1_CRB_AGT_ADR	0x29
+
+#define QLA82XX_HW_SN_CRB_AGT_ADR	0x10
+#define QLA82XX_HW_I2Q_CRB_AGT_ADR	0x20
+#define QLA82XX_HW_LPC_CRB_AGT_ADR	0x22
+#define QLA82XX_HW_ROMUSB_CRB_AGT_ADR	0x21
+#define QLA82XX_HW_QM_CRB_AGT_ADR	0x66
+#define QLA82XX_HW_SQG0_CRB_AGT_ADR	0x60
+#define QLA82XX_HW_SQG1_CRB_AGT_ADR	0x61
+#define QLA82XX_HW_SQG2_CRB_AGT_ADR	0x62
+#define QLA82XX_HW_SQG3_CRB_AGT_ADR	0x63
+#define QLA82XX_HW_RPMX1_CRB_AGT_ADR	0x09
+#define QLA82XX_HW_RPMX5_CRB_AGT_ADR	0x0d
+#define QLA82XX_HW_RPMX6_CRB_AGT_ADR	0x0e
+#define QLA82XX_HW_RPMX8_CRB_AGT_ADR	0x11
+
+/*  Hub 3 */
+#define QLA82XX_HW_PH_CRB_AGT_ADR	0x1A
+#define QLA82XX_HW_SRE_CRB_AGT_ADR	0x50
+#define QLA82XX_HW_EG_CRB_AGT_ADR	0x51
+#define QLA82XX_HW_RPMX0_CRB_AGT_ADR	0x08
+
+/*  Hub 4 */
+#define QLA82XX_HW_PEGN0_CRB_AGT_ADR	0x40
+#define QLA82XX_HW_PEGN1_CRB_AGT_ADR	0x41
+#define QLA82XX_HW_PEGN2_CRB_AGT_ADR	0x42
+#define QLA82XX_HW_PEGN3_CRB_AGT_ADR	0x43
+#define QLA82XX_HW_PEGNI_CRB_AGT_ADR	0x44
+#define QLA82XX_HW_PEGND_CRB_AGT_ADR	0x45
+#define QLA82XX_HW_PEGNC_CRB_AGT_ADR	0x46
+#define QLA82XX_HW_PEGR0_CRB_AGT_ADR	0x47
+#define QLA82XX_HW_PEGR1_CRB_AGT_ADR	0x48
+#define QLA82XX_HW_PEGR2_CRB_AGT_ADR	0x49
+#define QLA82XX_HW_PEGR3_CRB_AGT_ADR	0x4a
+#define QLA82XX_HW_PEGN4_CRB_AGT_ADR	0x4b
+
+/*  Hub 5 */
+#define QLA82XX_HW_PEGS0_CRB_AGT_ADR	0x40
+#define QLA82XX_HW_PEGS1_CRB_AGT_ADR	0x41
+#define QLA82XX_HW_PEGS2_CRB_AGT_ADR	0x42
+#define QLA82XX_HW_PEGS3_CRB_AGT_ADR	0x43
+#define QLA82XX_HW_PEGSI_CRB_AGT_ADR	0x44
+#define QLA82XX_HW_PEGSD_CRB_AGT_ADR	0x45
+#define QLA82XX_HW_PEGSC_CRB_AGT_ADR	0x46
+
+/*  Hub 6 */
+#define QLA82XX_HW_CAS0_CRB_AGT_ADR	0x46
+#define QLA82XX_HW_CAS1_CRB_AGT_ADR	0x47
+#define QLA82XX_HW_CAS2_CRB_AGT_ADR	0x48
+#define QLA82XX_HW_CAS3_CRB_AGT_ADR	0x49
+#define QLA82XX_HW_NCM_CRB_AGT_ADR	0x16
+#define QLA82XX_HW_TMR_CRB_AGT_ADR	0x17
+#define QLA82XX_HW_XDMA_CRB_AGT_ADR	0x05
+#define QLA82XX_HW_OCM0_CRB_AGT_ADR	0x06
+#define QLA82XX_HW_OCM1_CRB_AGT_ADR	0x07
+
+/*  This field defines PCI/X adr [25:20] of agents on the CRB */
+/*  */
+#define QLA82XX_HW_PX_MAP_CRB_PH	0
+#define QLA82XX_HW_PX_MAP_CRB_PS	1
+#define QLA82XX_HW_PX_MAP_CRB_MN	2
+#define QLA82XX_HW_PX_MAP_CRB_MS	3
+#define QLA82XX_HW_PX_MAP_CRB_SRE	5
+#define QLA82XX_HW_PX_MAP_CRB_NIU	6
+#define QLA82XX_HW_PX_MAP_CRB_QMN	7
+#define QLA82XX_HW_PX_MAP_CRB_SQN0	8
+#define QLA82XX_HW_PX_MAP_CRB_SQN1	9
+#define QLA82XX_HW_PX_MAP_CRB_SQN2	10
+#define QLA82XX_HW_PX_MAP_CRB_SQN3	11
+#define QLA82XX_HW_PX_MAP_CRB_QMS	12
+#define QLA82XX_HW_PX_MAP_CRB_SQS0	13
+#define QLA82XX_HW_PX_MAP_CRB_SQS1	14
+#define QLA82XX_HW_PX_MAP_CRB_SQS2	15
+#define QLA82XX_HW_PX_MAP_CRB_SQS3	16
+#define QLA82XX_HW_PX_MAP_CRB_PGN0	17
+#define QLA82XX_HW_PX_MAP_CRB_PGN1	18
+#define QLA82XX_HW_PX_MAP_CRB_PGN2	19
+#define QLA82XX_HW_PX_MAP_CRB_PGN3	20
+#define QLA82XX_HW_PX_MAP_CRB_PGN4	QLA82XX_HW_PX_MAP_CRB_SQS2
+#define QLA82XX_HW_PX_MAP_CRB_PGND	21
+#define QLA82XX_HW_PX_MAP_CRB_PGNI	22
+#define QLA82XX_HW_PX_MAP_CRB_PGS0	23
+#define QLA82XX_HW_PX_MAP_CRB_PGS1	24
+#define QLA82XX_HW_PX_MAP_CRB_PGS2	25
+#define QLA82XX_HW_PX_MAP_CRB_PGS3	26
+#define QLA82XX_HW_PX_MAP_CRB_PGSD	27
+#define QLA82XX_HW_PX_MAP_CRB_PGSI	28
+#define QLA82XX_HW_PX_MAP_CRB_SN	29
+#define QLA82XX_HW_PX_MAP_CRB_EG	31
+#define QLA82XX_HW_PX_MAP_CRB_PH2	32
+#define QLA82XX_HW_PX_MAP_CRB_PS2	33
+#define QLA82XX_HW_PX_MAP_CRB_CAM	34
+#define QLA82XX_HW_PX_MAP_CRB_CAS0	35
+#define QLA82XX_HW_PX_MAP_CRB_CAS1	36
+#define QLA82XX_HW_PX_MAP_CRB_CAS2	37
+#define QLA82XX_HW_PX_MAP_CRB_C2C0	38
+#define QLA82XX_HW_PX_MAP_CRB_C2C1	39
+#define QLA82XX_HW_PX_MAP_CRB_TIMR	40
+#define QLA82XX_HW_PX_MAP_CRB_RPMX1	42
+#define QLA82XX_HW_PX_MAP_CRB_RPMX2	43
+#define QLA82XX_HW_PX_MAP_CRB_RPMX3	44
+#define QLA82XX_HW_PX_MAP_CRB_RPMX4	45
+#define QLA82XX_HW_PX_MAP_CRB_RPMX5	46
+#define QLA82XX_HW_PX_MAP_CRB_RPMX6	47
+#define QLA82XX_HW_PX_MAP_CRB_RPMX7	48
+#define QLA82XX_HW_PX_MAP_CRB_XDMA	49
+#define QLA82XX_HW_PX_MAP_CRB_I2Q	50
+#define QLA82XX_HW_PX_MAP_CRB_ROMUSB	51
+#define QLA82XX_HW_PX_MAP_CRB_CAS3	52
+#define QLA82XX_HW_PX_MAP_CRB_RPMX0	53
+#define QLA82XX_HW_PX_MAP_CRB_RPMX8	54
+#define QLA82XX_HW_PX_MAP_CRB_RPMX9	55
+#define QLA82XX_HW_PX_MAP_CRB_OCM0	56
+#define QLA82XX_HW_PX_MAP_CRB_OCM1	57
+#define QLA82XX_HW_PX_MAP_CRB_SMB	58
+#define QLA82XX_HW_PX_MAP_CRB_I2C0	59
+#define QLA82XX_HW_PX_MAP_CRB_I2C1	60
+#define QLA82XX_HW_PX_MAP_CRB_LPC	61
+#define QLA82XX_HW_PX_MAP_CRB_PGNC	62
+#define QLA82XX_HW_PX_MAP_CRB_PGR0	63
+#define QLA82XX_HW_PX_MAP_CRB_PGR1	4
+#define QLA82XX_HW_PX_MAP_CRB_PGR2	30
+#define QLA82XX_HW_PX_MAP_CRB_PGR3	41
+
+/*  This field defines CRB adr [31:20] of the agents */
+/*  */
+
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MN	    ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_MN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PH	    ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PH_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_MS	    ((QLA82XX_HW_H0_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_MS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PS	    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SS	    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMS	    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_QMS_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS0     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS1     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS2     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQS3     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C0     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_C2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_C2C1     ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_C2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX7_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX9_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SMB	    ((QLA82XX_HW_H1_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SMB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_NIU	    ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_NIU_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0     ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_I2C0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1     ((QLA82XX_HW_H2_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_I2C1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SRE	    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SRE_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_EG	    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_EG_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_QMN	    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_QM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQG0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQG1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQG2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SQG3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX5_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX6_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8    ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_RPMX8_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS0     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_CAS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS1     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_CAS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS2     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_CAS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAS3     ((QLA82XX_HW_H3_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_CAS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGNI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGND     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGND_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGN0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGN1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGN2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGN3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4	   ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGN4_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGNC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR0     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGR0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR1     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGR1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR2     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGR2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGR3     ((QLA82XX_HW_H4_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGR3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGSI_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSD     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGSD_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGS0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGS1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGS2_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGS3_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_PGSC     ((QLA82XX_HW_H5_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_PEGSC_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_CAM	    ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_NCM_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_TMR_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_XDMA_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_SN	    ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_SN_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q	    ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_I2Q_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB   ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_ROMUSB_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_OCM0_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_OCM1     ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_OCM1_CRB_AGT_ADR)
+#define QLA82XX_HW_CRB_HUB_AGT_ADR_LPC	    ((QLA82XX_HW_H6_CH_HUB_ADR << 7) | \
+	QLA82XX_HW_LPC_CRB_AGT_ADR)
+
+#define ROMUSB_GLB				(QLA82XX_CRB_ROMUSB + 0x00000)
+#define QLA82XX_ROMUSB_GLB_PEGTUNE_DONE		(ROMUSB_GLB + 0x005c)
+#define QLA82XX_ROMUSB_GLB_STATUS		(ROMUSB_GLB + 0x0004)
+#define QLA82XX_ROMUSB_GLB_SW_RESET		(ROMUSB_GLB + 0x0008)
+#define QLA82XX_ROMUSB_ROM_ADDRESS		(ROMUSB_ROM + 0x0008)
+#define QLA82XX_ROMUSB_ROM_WDATA		(ROMUSB_ROM + 0x000c)
+#define QLA82XX_ROMUSB_ROM_ABYTE_CNT		(ROMUSB_ROM + 0x0010)
+#define QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT	(ROMUSB_ROM + 0x0014)
+#define QLA82XX_ROMUSB_ROM_RDATA		(ROMUSB_ROM + 0x0018)
+
+#define ROMUSB_ROM				(QLA82XX_CRB_ROMUSB + 0x10000)
+#define QLA82XX_ROMUSB_ROM_INSTR_OPCODE		(ROMUSB_ROM + 0x0004)
+#define QLA82XX_ROMUSB_GLB_CAS_RST		(ROMUSB_GLB + 0x0038)
+
+#define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000	 /* all are 1MB windows */
+#define QLA82XX_PCI_CRB_WINDOW(A) \
+	(QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
+#define QLA82XX_CRB_C2C_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C0)
+#define QLA82XX_CRB_C2C_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C1)
+#define QLA82XX_CRB_C2C_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_C2C2)
+#define QLA82XX_CRB_CAM \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAM)
+#define QLA82XX_CRB_CASPER \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS)
+#define QLA82XX_CRB_CASPER_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS0)
+#define QLA82XX_CRB_CASPER_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS1)
+#define QLA82XX_CRB_CASPER_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_CAS2)
+#define QLA82XX_CRB_DDR_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MS)
+#define QLA82XX_CRB_DDR_NET \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_MN)
+#define QLA82XX_CRB_EPG \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_EG)
+#define QLA82XX_CRB_I2Q \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2Q)
+#define QLA82XX_CRB_NIU \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_NIU)
+
+#define QLA82XX_CRB_PCIX_HOST \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH)
+#define QLA82XX_CRB_PCIX_HOST2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PH2)
+#define QLA82XX_CRB_PCIX_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS)
+#define QLA82XX_CRB_PCIE \
+	QLA82XX_CRB_PCIX_MD
+
+/* window 1 pcie slot */
+#define QLA82XX_CRB_PCIE2	 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PS2)
+#define QLA82XX_CRB_PEG_MD_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS0)
+#define QLA82XX_CRB_PEG_MD_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS1)
+#define QLA82XX_CRB_PEG_MD_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS2)
+#define QLA82XX_CRB_PEG_MD_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGS3)
+#define QLA82XX_CRB_PEG_MD_D \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSD)
+#define QLA82XX_CRB_PEG_MD_I \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGSI)
+#define QLA82XX_CRB_PEG_NET_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN0)
+#define QLA82XX_CRB_PEG_NET_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN1)
+#define QLA82XX_CRB_PEG_NET_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN2)
+#define QLA82XX_CRB_PEG_NET_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN3)
+#define QLA82XX_CRB_PEG_NET_4 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGN4)
+#define QLA82XX_CRB_PEG_NET_D \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGND)
+#define QLA82XX_CRB_PEG_NET_I \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_PGNI)
+#define QLA82XX_CRB_PQM_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMS)
+#define QLA82XX_CRB_PQM_NET \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_QMN)
+#define QLA82XX_CRB_QDR_MD \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SS)
+#define QLA82XX_CRB_QDR_NET \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SN)
+#define QLA82XX_CRB_ROMUSB \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_ROMUSB)
+#define QLA82XX_CRB_RPMX_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX0)
+#define QLA82XX_CRB_RPMX_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX1)
+#define QLA82XX_CRB_RPMX_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX2)
+#define QLA82XX_CRB_RPMX_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX3)
+#define QLA82XX_CRB_RPMX_4 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX4)
+#define QLA82XX_CRB_RPMX_5 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX5)
+#define QLA82XX_CRB_RPMX_6 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX6)
+#define QLA82XX_CRB_RPMX_7 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_RPMX7)
+#define QLA82XX_CRB_SQM_MD_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS0)
+#define QLA82XX_CRB_SQM_MD_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS1)
+#define QLA82XX_CRB_SQM_MD_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS2)
+#define QLA82XX_CRB_SQM_MD_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQS3)
+#define QLA82XX_CRB_SQM_NET_0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN0)
+#define QLA82XX_CRB_SQM_NET_1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN1)
+#define QLA82XX_CRB_SQM_NET_2 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN2)
+#define QLA82XX_CRB_SQM_NET_3 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SQN3)
+#define QLA82XX_CRB_SRE \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SRE)
+#define QLA82XX_CRB_TIMER \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_TIMR)
+#define QLA82XX_CRB_XDMA \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_XDMA)
+#define QLA82XX_CRB_I2C0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C0)
+#define QLA82XX_CRB_I2C1 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_I2C1)
+#define QLA82XX_CRB_OCM0 \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_OCM0)
+#define QLA82XX_CRB_SMB \
+	QLA82XX_PCI_CRB_WINDOW(QLA82XX_HW_PX_MAP_CRB_SMB)
+#define QLA82XX_CRB_MAX \
+	QLA82XX_PCI_CRB_WINDOW(64)
+
+/*
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ * Base addresses of major components on-chip.
+ * ====================== BASE ADDRESSES ON-CHIP ======================
+ */
+#define QLA82XX_ADDR_DDR_NET		(0x0000000000000000ULL)
+#define QLA82XX_ADDR_DDR_NET_MAX	(0x000000000fffffffULL)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA82XX_P2_ADDR_PCIE		(0x0000000800000000ULL)
+#define QLA82XX_P3_ADDR_PCIE		(0x0000008000000000ULL)
+#define QLA82XX_ADDR_PCIE_MAX		(0x0000000FFFFFFFFFULL)
+#define QLA82XX_ADDR_OCM0		(0x0000000200000000ULL)
+#define QLA82XX_ADDR_OCM0_MAX		(0x00000002000fffffULL)
+#define QLA82XX_ADDR_OCM1		(0x0000000200400000ULL)
+#define QLA82XX_ADDR_OCM1_MAX		(0x00000002004fffffULL)
+#define QLA82XX_ADDR_QDR_NET		(0x0000000300000000ULL)
+#define QLA82XX_P3_ADDR_QDR_NET_MAX	(0x0000000303ffffffULL)
+
+#define QLA82XX_PCI_CRBSPACE		(unsigned long)0x06000000
+#define QLA82XX_PCI_DIRECT_CRB		(unsigned long)0x04400000
+#define QLA82XX_PCI_CAMQM		(unsigned long)0x04800000
+#define QLA82XX_PCI_CAMQM_MAX		(unsigned long)0x04ffffff
+#define QLA82XX_PCI_DDR_NET		(unsigned long)0x00000000
+#define QLA82XX_PCI_QDR_NET		(unsigned long)0x04000000
+#define QLA82XX_PCI_QDR_NET_MAX		(unsigned long)0x043fffff
+
+/*
+ *   Register offsets for MN
+ */
+#define MIU_CONTROL			(0x000)
+#define MIU_TAG				(0x004)
+#define MIU_TEST_AGT_CTRL		(0x090)
+#define MIU_TEST_AGT_ADDR_LO		(0x094)
+#define MIU_TEST_AGT_ADDR_HI		(0x098)
+#define MIU_TEST_AGT_WRDATA_LO		(0x0a0)
+#define MIU_TEST_AGT_WRDATA_HI		(0x0a4)
+#define MIU_TEST_AGT_WRDATA(i)		(0x0a0+(4*(i)))
+#define MIU_TEST_AGT_RDDATA_LO		(0x0a8)
+#define MIU_TEST_AGT_RDDATA_HI		(0x0ac)
+#define MIU_TEST_AGT_RDDATA(i)		(0x0a8+(4*(i)))
+#define MIU_TEST_AGT_ADDR_MASK		0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off)	(0)
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_START	1
+#define MIU_TA_CTL_ENABLE	2
+#define MIU_TA_CTL_WRITE	4
+#define MIU_TA_CTL_BUSY		8
+
+/*CAM RAM */
+# define QLA82XX_CAM_RAM_BASE		(QLA82XX_CRB_CAM + 0x02000)
+# define QLA82XX_CAM_RAM(reg)		(QLA82XX_CAM_RAM_BASE + (reg))
+
+#define QLA82XX_PORT_MODE_ADDR		(QLA82XX_CAM_RAM(0x24))
+#define QLA82XX_PEG_HALT_STATUS1	(QLA82XX_CAM_RAM(0xa8))
+#define QLA82XX_PEG_HALT_STATUS2	(QLA82XX_CAM_RAM(0xac))
+#define QLA82XX_PEG_ALIVE_COUNTER	(QLA82XX_CAM_RAM(0xb0))
+
+#define QLA82XX_CAMRAM_DB1		(QLA82XX_CAM_RAM(0x1b8))
+#define QLA82XX_CAMRAM_DB2		(QLA82XX_CAM_RAM(0x1bc))
+
+#define HALT_STATUS_UNRECOVERABLE	0x80000000
+#define HALT_STATUS_RECOVERABLE		0x40000000
+
+/* Driver Coexistence Defines */
+#define QLA82XX_CRB_DRV_ACTIVE	     (QLA82XX_CAM_RAM(0x138))
+#define QLA82XX_CRB_DEV_STATE	     (QLA82XX_CAM_RAM(0x140))
+#define QLA82XX_CRB_DRV_STATE	     (QLA82XX_CAM_RAM(0x144))
+#define QLA82XX_CRB_DRV_SCRATCH      (QLA82XX_CAM_RAM(0x148))
+#define QLA82XX_CRB_DEV_PART_INFO    (QLA82XX_CAM_RAM(0x14c))
+#define QLA82XX_CRB_DRV_IDC_VERSION  (QLA82XX_CAM_RAM(0x174))
+
+/* Every driver should use these Device State */
+#define QLA8XXX_DEV_COLD		1
+#define QLA8XXX_DEV_INITIALIZING	2
+#define QLA8XXX_DEV_READY		3
+#define QLA8XXX_DEV_NEED_RESET		4
+#define QLA8XXX_DEV_NEED_QUIESCENT	5
+#define QLA8XXX_DEV_FAILED		6
+#define QLA8XXX_DEV_QUIESCENT		7
+#define	MAX_STATES			8 /* Increment if new state added */
+#define QLA8XXX_BAD_VALUE		0xbad0bad0
+
+#define QLA82XX_IDC_VERSION			1
+#define QLA82XX_ROM_DEV_INIT_TIMEOUT		30
+#define QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT	10
+
+#define QLA82XX_ROM_LOCK_ID		(QLA82XX_CAM_RAM(0x100))
+#define QLA82XX_CRB_WIN_LOCK_ID		(QLA82XX_CAM_RAM(0x124))
+#define QLA82XX_FW_VERSION_MAJOR	(QLA82XX_CAM_RAM(0x150))
+#define QLA82XX_FW_VERSION_MINOR	(QLA82XX_CAM_RAM(0x154))
+#define QLA82XX_FW_VERSION_SUB		(QLA82XX_CAM_RAM(0x158))
+#define QLA82XX_PCIE_REG(reg)		(QLA82XX_CRB_PCIE + (reg))
+
+#define PCIE_SETUP_FUNCTION		(0x12040)
+#define PCIE_SETUP_FUNCTION2		(0x12048)
+
+#define QLA82XX_PCIX_PS_REG(reg)	(QLA82XX_CRB_PCIX_MD + (reg))
+#define QLA82XX_PCIX_PS2_REG(reg)	(QLA82XX_CRB_PCIE2 + (reg))
+
+#define PCIE_SEM2_LOCK	     (0x1c010)	/* Flash lock	*/
+#define PCIE_SEM2_UNLOCK     (0x1c014)	/* Flash unlock */
+#define PCIE_SEM5_LOCK	     (0x1c028)	/* Coexistence lock   */
+#define PCIE_SEM5_UNLOCK     (0x1c02c)	/* Coexistence unlock */
+#define PCIE_SEM7_LOCK	     (0x1c038)	/* crb win lock */
+#define PCIE_SEM7_UNLOCK     (0x1c03c)	/* crbwin unlock*/
+
+/* Different drive state */
+#define QLA82XX_DRVST_NOT_RDY		0
+#define	QLA82XX_DRVST_RST_RDY		1
+#define QLA82XX_DRVST_QSNT_RDY		2
+
+/* Different drive active state */
+#define QLA82XX_DRV_NOT_ACTIVE		0
+#define QLA82XX_DRV_ACTIVE		1
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISP8021		0x8021
+#define PCI_DEVICE_ID_QLOGIC_ISP8044		0x8044
+
+#define QLA82XX_MSIX_TBL_SPACE			8192
+#define QLA82XX_PCI_REG_MSIX_TBL		0x44
+#define QLA82XX_PCI_MSIX_CONTROL		0x40
+
+struct crb_128M_2M_sub_block_map {
+	unsigned valid;
+	unsigned start_128M;
+	unsigned end_128M;
+	unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map {
+	struct crb_128M_2M_sub_block_map sub_block[16];
+};
+
+struct crb_addr_pair {
+	long addr;
+	long data;
+};
+
+#define ADDR_ERROR ((unsigned long) 0xffffffff)
+#define MAX_CTL_CHECK	1000
+
+/***************************************************************************
+ *		PCI related defines.
+ **************************************************************************/
+
+/*
+ * Interrupt related defines.
+ */
+#define PCIX_TARGET_STATUS	(0x10118)
+#define PCIX_TARGET_STATUS_F1	(0x10160)
+#define PCIX_TARGET_STATUS_F2	(0x10164)
+#define PCIX_TARGET_STATUS_F3	(0x10168)
+#define PCIX_TARGET_STATUS_F4	(0x10360)
+#define PCIX_TARGET_STATUS_F5	(0x10364)
+#define PCIX_TARGET_STATUS_F6	(0x10368)
+#define PCIX_TARGET_STATUS_F7	(0x1036c)
+
+#define PCIX_TARGET_MASK	(0x10128)
+#define PCIX_TARGET_MASK_F1	(0x10170)
+#define PCIX_TARGET_MASK_F2	(0x10174)
+#define PCIX_TARGET_MASK_F3	(0x10178)
+#define PCIX_TARGET_MASK_F4	(0x10370)
+#define PCIX_TARGET_MASK_F5	(0x10374)
+#define PCIX_TARGET_MASK_F6	(0x10378)
+#define PCIX_TARGET_MASK_F7	(0x1037c)
+
+/*
+ * Message Signaled Interrupts
+ */
+#define PCIX_MSI_F0		(0x13000)
+#define PCIX_MSI_F1		(0x13004)
+#define PCIX_MSI_F2		(0x13008)
+#define PCIX_MSI_F3		(0x1300c)
+#define PCIX_MSI_F4		(0x13010)
+#define PCIX_MSI_F5		(0x13014)
+#define PCIX_MSI_F6		(0x13018)
+#define PCIX_MSI_F7		(0x1301c)
+#define PCIX_MSI_F(FUNC)	(0x13000 + ((FUNC) * 4))
+#define PCIX_INT_VECTOR		(0x10100)
+#define PCIX_INT_MASK		(0x10104)
+
+/*
+ * Interrupt state machine and other bits.
+ */
+#define PCIE_MISCCFG_RC		(0x1206c)
+
+#define ISR_INT_TARGET_STATUS \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_STATUS_F1 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_STATUS_F2 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_STATUS_F3 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_STATUS_F4 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_STATUS_F5 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_STATUS_F6 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_STATUS_F7 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+
+#define ISR_INT_TARGET_MASK \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_MASK_F1 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_MASK_F2 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_MASK_F3 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_MASK_F4 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_MASK_F5 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_MASK_F6 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_MASK_F7 \
+	(QLA82XX_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define ISR_INT_VECTOR \
+	(QLA82XX_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK \
+	(QLA82XX_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_STATE_REG \
+	(QLA82XX_PCIX_PS_REG(PCIE_MISCCFG_RC))
+
+#define	ISR_MSI_INT_TRIGGER(FUNC) \
+	(QLA82XX_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+
+#define	ISR_IS_LEGACY_INTR_IDLE(VAL)		(((VAL) & 0x300) == 0)
+#define	ISR_IS_LEGACY_INTR_TRIGGERED(VAL)	(((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define	PCIX_INT_VECTOR_BIT_F0	0x0080
+#define	PCIX_INT_VECTOR_BIT_F1	0x0100
+#define	PCIX_INT_VECTOR_BIT_F2	0x0200
+#define	PCIX_INT_VECTOR_BIT_F3	0x0400
+#define	PCIX_INT_VECTOR_BIT_F4	0x0800
+#define	PCIX_INT_VECTOR_BIT_F5	0x1000
+#define	PCIX_INT_VECTOR_BIT_F6	0x2000
+#define	PCIX_INT_VECTOR_BIT_F7	0x4000
+
+struct qla82xx_legacy_intr_set {
+	uint32_t	int_vec_bit;
+	uint32_t	tgt_status_reg;
+	uint32_t	tgt_mask_reg;
+	uint32_t	pci_int_reg;
+};
+
+#define QLA82XX_LEGACY_INTR_CONFIG					\
+{									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F0,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS,		\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(0) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F1,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F1,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F1,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(1) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F2,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F2,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F2,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(2) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F3,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F3,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F3,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(3) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F4,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F4,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F4,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(4) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F5,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F5,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F5,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(5) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F6,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F6,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F6,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(6) },	\
+									\
+	{								\
+		.int_vec_bit	=	PCIX_INT_VECTOR_BIT_F7,		\
+		.tgt_status_reg =	ISR_INT_TARGET_STATUS_F7,	\
+		.tgt_mask_reg	=	ISR_INT_TARGET_MASK_F7,		\
+		.pci_int_reg	=	ISR_MSI_INT_TRIGGER(7) },	\
+}
+
+#define BRDCFG_START		0x4000
+#define	BOOTLD_START		0x10000
+#define	IMAGE_START		0x100000
+#define FLASH_ADDR_START	0x43000
+
+/* Magic number to let user know flash is programmed */
+#define QLA82XX_BDINFO_MAGIC	0x12345678
+#define QLA82XX_FW_MAGIC_OFFSET	(BRDCFG_START + 0x128)
+#define FW_SIZE_OFFSET		(0x3e840c)
+#define QLA82XX_FW_MIN_SIZE	0x3fffff
+
+/* UNIFIED ROMIMAGE START */
+#define QLA82XX_URI_FW_MIN_SIZE			0xc8000
+#define QLA82XX_URI_DIR_SECT_PRODUCT_TBL	0x0
+#define QLA82XX_URI_DIR_SECT_BOOTLD		0x6
+#define QLA82XX_URI_DIR_SECT_FW			0x7
+
+/* Offsets */
+#define QLA82XX_URI_CHIP_REV_OFF	10
+#define QLA82XX_URI_FLAGS_OFF		11
+#define QLA82XX_URI_BIOS_VERSION_OFF	12
+#define QLA82XX_URI_BOOTLD_IDX_OFF	27
+#define QLA82XX_URI_FIRMWARE_IDX_OFF	29
+
+struct qla82xx_uri_table_desc{
+	uint32_t	findex;
+	uint32_t	num_entries;
+	uint32_t	entry_size;
+	uint32_t	reserved[5];
+};
+
+struct qla82xx_uri_data_desc{
+	uint32_t	findex;
+	uint32_t	size;
+	uint32_t	reserved[5];
+};
+
+/* UNIFIED ROMIMAGE END */
+
+#define QLA82XX_UNIFIED_ROMIMAGE	3
+#define QLA82XX_FLASH_ROMIMAGE		4
+#define QLA82XX_UNKNOWN_ROMIMAGE	0xff
+
+#define MIU_TEST_AGT_WRDATA_UPPER_LO		(0x0b0)
+#define	MIU_TEST_AGT_WRDATA_UPPER_HI		(0x0b4)
+
+/* Request and response queue size */
+#define REQUEST_ENTRY_CNT_82XX		128	/* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_82XX		128	/* Number of response entries.*/
+
+/*
+ * ISP 8021 I/O Register Set structure definitions.
+ */
+struct device_reg_82xx {
+	uint32_t req_q_out[64];		/* Request Queue out-Pointer (64 * 4) */
+	uint32_t rsp_q_in[64];		/* Response Queue In-Pointer. */
+	uint32_t rsp_q_out[64];		/* Response Queue Out-Pointer. */
+
+	uint16_t mailbox_in[32];	/* Mail box In registers */
+	uint16_t unused_1[32];
+	uint32_t hint;			/* Host interrupt register */
+#define	HINT_MBX_INT_PENDING	BIT_0
+	uint16_t unused_2[62];
+	uint16_t mailbox_out[32];	/* Mail box Out registers */
+	uint32_t unused_3[48];
+
+	uint32_t host_status;		/* host status */
+#define HSRX_RISC_INT		BIT_15	/* RISC to Host interrupt. */
+#define HSRX_RISC_PAUSED	BIT_8	/* RISC Paused. */
+	uint32_t host_int;		/* Interrupt status. */
+#define ISRX_NX_RISC_INT	BIT_0	/* RISC interrupt. */
+};
+
+struct fcp_cmnd {
+	struct scsi_lun lun;
+	uint8_t crn;
+	uint8_t task_attribute;
+	uint8_t task_management;
+	uint8_t additional_cdb_len;
+	uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+};
+
+struct dsd_dma {
+	struct list_head list;
+	dma_addr_t dsd_list_dma;
+	void *dsd_addr;
+};
+
+#define QLA_DSDS_PER_IOCB	37
+#define QLA_DSD_SIZE		12
+struct ct6_dsd {
+	uint16_t fcp_cmnd_len;
+	dma_addr_t fcp_cmnd_dma;
+	struct fcp_cmnd *fcp_cmnd;
+	int dsd_use_cnt;
+	struct list_head dsd_list;
+};
+
+#define MBC_TOGGLE_INTERRUPT	0x10
+#define MBC_SET_LED_CONFIG	0x125	/* FCoE specific LED control */
+#define MBC_GET_LED_CONFIG	0x126	/* FCoE specific LED control */
+
+/* Flash  offset */
+#define FLT_REG_BOOTLOAD_82XX	0x72
+#define FLT_REG_BOOT_CODE_82XX	0x78
+#define FLT_REG_FW_82XX		0x74
+#define FLT_REG_GOLD_FW_82XX	0x75
+#define FLT_REG_VPD_8XXX	0x81
+
+#define	FA_VPD_SIZE_82XX	0x400
+
+#define FA_FLASH_LAYOUT_ADDR_82	0xFC400
+
+/******************************************************************************
+*
+*    Definitions specific to M25P flash
+*
+*******************************************************************************
+*   Instructions
+*/
+#define M25P_INSTR_WREN		0x06
+#define M25P_INSTR_WRDI		0x04
+#define M25P_INSTR_RDID		0x9f
+#define M25P_INSTR_RDSR		0x05
+#define M25P_INSTR_WRSR		0x01
+#define M25P_INSTR_READ		0x03
+#define M25P_INSTR_FAST_READ	0x0b
+#define M25P_INSTR_PP		0x02
+#define M25P_INSTR_SE		0xd8
+#define M25P_INSTR_BE		0xc7
+#define M25P_INSTR_DP		0xb9
+#define M25P_INSTR_RES		0xab
+
+/* Minidump related */
+
+/*
+ * Version of the template
+ * 4 Bytes
+ * X.Major.Minor.RELEASE
+ */
+#define QLA82XX_MINIDUMP_VERSION         0x10101
+
+/*
+ * Entry Type Defines
+ */
+#define QLA82XX_RDNOP                   0
+#define QLA82XX_RDCRB                   1
+#define QLA82XX_RDMUX                   2
+#define QLA82XX_QUEUE                   3
+#define QLA82XX_BOARD                   4
+#define QLA82XX_RDSRE                   5
+#define QLA82XX_RDOCM                   6
+#define QLA82XX_CACHE                  10
+#define QLA82XX_L1DAT                  11
+#define QLA82XX_L1INS                  12
+#define QLA82XX_L2DTG                  21
+#define QLA82XX_L2ITG                  22
+#define QLA82XX_L2DAT                  23
+#define QLA82XX_L2INS                  24
+#define QLA82XX_RDROM                  71
+#define QLA82XX_RDMEM                  72
+#define QLA82XX_CNTRL                  98
+#define QLA82XX_TLHDR                  99
+#define QLA82XX_RDEND                  255
+#define QLA8044_POLLRD			35
+#define QLA8044_RDMUX2			36
+#define QLA8044_L1DTG			8
+#define QLA8044_L1ITG			9
+#define QLA8044_POLLRDMWR		37
+
+/*
+ * Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR        0x01
+#define QLA82XX_DBG_OPCODE_RW        0x02
+#define QLA82XX_DBG_OPCODE_AND       0x04
+#define QLA82XX_DBG_OPCODE_OR        0x08
+#define QLA82XX_DBG_OPCODE_POLL      0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE   0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE   0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE   0x80
+
+/*
+ * Template Header and Entry Header definitions start here.
+ */
+
+/*
+ * Template Header
+ * Parts of the template header can be modified by the driver.
+ * These include the saved_state_array, capture_debug_level, driver_timestamp
+ */
+
+#define QLA82XX_DBG_STATE_ARRAY_LEN        16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN     8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN         8
+
+/*
+ * Driver Flags
+ */
+#define QLA82XX_DBG_SKIPPED_FLAG	0x80	/* driver skipped this entry */
+#define	QLA82XX_DEFAULT_CAP_MASK	0xFF	/* default capture mask */
+
+struct qla82xx_md_template_hdr {
+	uint32_t entry_type;
+	uint32_t first_entry_offset;
+	uint32_t size_of_template;
+	uint32_t capture_debug_level;
+
+	uint32_t num_of_entries;
+	uint32_t version;
+	uint32_t driver_timestamp;
+	uint32_t template_checksum;
+
+	uint32_t driver_capture_mask;
+	uint32_t driver_info[3];
+
+	uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+	uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+
+	/*  markers_array used to capture some special locations on board */
+	uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN];
+	uint32_t num_of_free_entries;	/* For internal use */
+	uint32_t free_entry_offset;	/* For internal use */
+	uint32_t total_table_size;	/*  For internal use */
+	uint32_t bkup_table_offset;	/*  For internal use */
+} __packed;
+
+/*
+ * Entry Header:  Common to All Entry Types
+ */
+
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+typedef struct qla82xx_md_entry_hdr {
+	uint32_t entry_type;
+	uint32_t entry_size;
+	uint32_t entry_capture_size;
+	struct {
+		uint8_t entry_capture_mask;
+		uint8_t entry_code;
+		uint8_t driver_code;
+		uint8_t driver_flags;
+	} d_ctrl;
+} __packed qla82xx_md_entry_hdr_t;
+
+/*
+ *  Read CRB entry header
+ */
+struct qla82xx_md_entry_crb {
+	qla82xx_md_entry_hdr_t h;
+	uint32_t addr;
+	struct {
+		uint8_t addr_stride;
+		uint8_t state_index_a;
+		uint16_t poll_timeout;
+	} crb_strd;
+
+	uint32_t data_size;
+	uint32_t op_count;
+
+	struct {
+		uint8_t opcode;
+		uint8_t state_index_v;
+		uint8_t shl;
+		uint8_t shr;
+	} crb_ctrl;
+
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t value_3;
+} __packed;
+
+/*
+ * Cache entry header
+ */
+struct qla82xx_md_entry_cache {
+	qla82xx_md_entry_hdr_t h;
+
+	uint32_t tag_reg_addr;
+	struct {
+		uint16_t tag_value_stride;
+		uint16_t init_tag_value;
+	} addr_ctrl;
+
+	uint32_t data_size;
+	uint32_t op_count;
+
+	uint32_t control_addr;
+	struct {
+		uint16_t write_value;
+		uint8_t poll_mask;
+		uint8_t poll_wait;
+	} cache_ctrl;
+
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_1;
+	} read_ctrl;
+} __packed;
+
+/*
+ * Read OCM
+ */
+struct qla82xx_md_entry_rdocm {
+	qla82xx_md_entry_hdr_t h;
+
+	uint32_t rsvd_0;
+	uint32_t rsvd_1;
+	uint32_t data_size;
+	uint32_t op_count;
+
+	uint32_t rsvd_2;
+	uint32_t rsvd_3;
+	uint32_t read_addr;
+	uint32_t read_addr_stride;
+	uint32_t read_addr_cntrl;
+} __packed;
+
+/*
+ * Read Memory
+ */
+struct qla82xx_md_entry_rdmem {
+	qla82xx_md_entry_hdr_t h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+} __packed;
+
+/*
+ * Read ROM
+ */
+struct qla82xx_md_entry_rdrom {
+	qla82xx_md_entry_hdr_t h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+} __packed;
+
+struct qla82xx_md_entry_mux {
+	qla82xx_md_entry_hdr_t h;
+
+	uint32_t select_addr;
+	uint32_t rsvd_0;
+	uint32_t data_size;
+	uint32_t op_count;
+
+	uint32_t select_value;
+	uint32_t select_value_stride;
+	uint32_t read_addr;
+	uint32_t rsvd_1;
+} __packed;
+
+struct qla82xx_md_entry_queue {
+	qla82xx_md_entry_hdr_t h;
+
+	uint32_t select_addr;
+	struct {
+		uint16_t queue_id_stride;
+		uint16_t rsvd_0;
+	} q_strd;
+
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_1;
+	uint32_t rsvd_2;
+
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_3;
+	} rd_strd;
+} __packed;
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE	0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW	0x42110030
+#define MD_DIRECT_ROM_READ_BASE	0x42150000
+#define MD_MIU_TEST_AGT_CTRL		0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO		0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI		0x41000098
+
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
+
+#define CRB_NIU_XG_PAUSE_CTL_P0        0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1        0x8
+
+#define qla82xx_get_temp_val(x)          ((x) >> 16)
+#define qla82xx_get_temp_state(x)        ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)  (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+	QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+	QLA82XX_TEMP_WARN,	   /* Sound alert, temperature getting high */
+	QLA82XX_TEMP_PANIC	   /* Fatal error, hardware has shut down. */
+};
+
+#define LEG_INTR_PTR_OFFSET	0x38C0
+#define LEG_INTR_TRIG_OFFSET	0x38C4
+#define LEG_INTR_MASK_OFFSET	0x38C8
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx2.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx2.c
new file mode 100644
index 0000000..0aa9c38
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx2.c
@@ -0,0 +1,4091 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+
+#define TIMEOUT_100_MS 100
+
+static const uint32_t qla8044_reg_tbl[] = {
+	QLA8044_PEG_HALT_STATUS1,
+	QLA8044_PEG_HALT_STATUS2,
+	QLA8044_PEG_ALIVE_COUNTER,
+	QLA8044_CRB_DRV_ACTIVE,
+	QLA8044_CRB_DEV_STATE,
+	QLA8044_CRB_DRV_STATE,
+	QLA8044_CRB_DRV_SCRATCH,
+	QLA8044_CRB_DEV_PART_INFO1,
+	QLA8044_CRB_IDC_VER_MAJOR,
+	QLA8044_FW_VER_MAJOR,
+	QLA8044_FW_VER_MINOR,
+	QLA8044_FW_VER_SUB,
+	QLA8044_CMDPEG_STATE,
+	QLA8044_ASIC_TEMP,
+};
+
+/* 8044 Flash Read/Write functions */
+uint32_t
+qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
+{
+	return readl((void __iomem *) (ha->nx_pcibase + addr));
+}
+
+void
+qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
+{
+	writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
+}
+
+int
+qla8044_rd_direct(struct scsi_qla_host *vha,
+	const uint32_t crb_reg)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (crb_reg < CRB_REG_INDEX_MAX)
+		return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
+	else
+		return QLA_FUNCTION_FAILED;
+}
+
+void
+qla8044_wr_direct(struct scsi_qla_host *vha,
+	const uint32_t crb_reg,
+	const uint32_t value)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (crb_reg < CRB_REG_INDEX_MAX)
+		qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
+}
+
+static int
+qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
+{
+	uint32_t val;
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
+	val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
+
+	if (val != addr) {
+		ql_log(ql_log_warn, vha, 0xb087,
+		    "%s: Failed to set register window : "
+		    "addr written 0x%x, read 0x%x!\n",
+		    __func__, addr, val);
+		ret_val = QLA_FUNCTION_FAILED;
+	}
+	return ret_val;
+}
+
+static int
+qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_set_win_base(vha, addr);
+	if (!ret_val)
+		*data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
+	else
+		ql_log(ql_log_warn, vha, 0xb088,
+		    "%s: failed read of addr 0x%x!\n", __func__, addr);
+	return ret_val;
+}
+
+static int
+qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_set_win_base(vha, addr);
+	if (!ret_val)
+		qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
+	else
+		ql_log(ql_log_warn, vha, 0xb089,
+		    "%s: failed wrt to addr 0x%x, data 0x%x\n",
+		    __func__, addr, data);
+	return ret_val;
+}
+
+/*
+ * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ *
+ */
+static void
+qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
+	uint32_t raddr, uint32_t waddr)
+{
+	uint32_t value;
+
+	qla8044_rd_reg_indirect(vha, raddr, &value);
+	qla8044_wr_reg_indirect(vha, waddr, value);
+}
+
+static int
+qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
+	uint32_t mask)
+{
+	unsigned long timeout;
+	uint32_t temp;
+
+	/* jiffies after 100ms */
+	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+	do {
+		qla8044_rd_reg_indirect(vha, addr1, &temp);
+		if ((temp & mask) != 0)
+			break;
+		if (time_after_eq(jiffies, timeout)) {
+			ql_log(ql_log_warn, vha, 0xb151,
+				"Error in processing rdmdio entry\n");
+			return -1;
+		}
+	} while (1);
+
+	return 0;
+}
+
+static uint32_t
+qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
+	uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
+{
+	uint32_t temp;
+	int ret = 0;
+
+	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+	if (ret == -1)
+		return -1;
+
+	temp = (0x40000000 | addr);
+	qla8044_wr_reg_indirect(vha, addr1, temp);
+
+	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+	if (ret == -1)
+		return 0;
+
+	qla8044_rd_reg_indirect(vha, addr3, &ret);
+
+	return ret;
+}
+
+
+static int
+qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
+	uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
+{
+	unsigned long timeout;
+	uint32_t temp;
+
+	/* jiffies after 100 msecs */
+	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+	do {
+		temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
+		if ((temp & 0x1) != 1)
+			break;
+		if (time_after_eq(jiffies, timeout)) {
+			ql_log(ql_log_warn, vha, 0xb152,
+			    "Error in processing mdiobus idle\n");
+			return -1;
+		}
+	} while (1);
+
+	return 0;
+}
+
+static int
+qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
+	uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
+{
+	int ret = 0;
+
+	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+	if (ret == -1)
+		return -1;
+
+	qla8044_wr_reg_indirect(vha, addr3, value);
+	qla8044_wr_reg_indirect(vha, addr1, addr);
+
+	ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+	if (ret == -1)
+		return -1;
+
+	return 0;
+}
+/*
+ * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @vha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
+	uint32_t raddr, uint32_t waddr,	struct qla8044_rmw *p_rmw_hdr)
+{
+	uint32_t value;
+
+	if (p_rmw_hdr->index_a)
+		value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
+	else
+		qla8044_rd_reg_indirect(vha, raddr, &value);
+	value &= p_rmw_hdr->test_mask;
+	value <<= p_rmw_hdr->shl;
+	value >>= p_rmw_hdr->shr;
+	value |= p_rmw_hdr->or_value;
+	value ^= p_rmw_hdr->xor_value;
+	qla8044_wr_reg_indirect(vha, waddr, value);
+	return;
+}
+
+static inline void
+qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
+{
+	uint32_t qsnt_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+	qsnt_state |= (1 << ha->portnum);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+	ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
+	     __func__, vha->host_no, qsnt_state);
+}
+
+void
+qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
+{
+	uint32_t qsnt_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+	qsnt_state &= ~(1 << ha->portnum);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+	ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
+	    __func__, vha->host_no, qsnt_state);
+}
+
+/**
+ *
+ * qla8044_lock_recovery - Recovers the idc_lock.
+ * @ha : Pointer to adapter structure
+ *
+ * Lock Recovery Register
+ * 5-2	Lock recovery owner: Function ID of driver doing lock recovery,
+ *	valid if bits 1..0 are set by driver doing lock recovery.
+ * 1-0  1 - Driver intends to force unlock the IDC lock.
+ *	2 - Driver is moving forward to unlock the IDC lock. Driver clears
+ *	    this field after force unlocking the IDC lock.
+ *
+ * Lock Recovery process
+ * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
+ *    greater than 0, then wait for the other driver to unlock otherwise
+ *    move to the next step.
+ * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
+ *    register bits 1..0 and also set the function# in bits 5..2.
+ * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
+ *    Wait for the other driver to perform lock recovery if the function
+ *    number in bits 5..2 has changed, otherwise move to the next step.
+ * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
+ *    leaving your function# in bits 5..2.
+ * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
+ *    the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
+ **/
+static int
+qla8044_lock_recovery(struct scsi_qla_host *vha)
+{
+	uint32_t lock = 0, lockid;
+	struct qla_hw_data *ha = vha->hw;
+
+	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+
+	/* Check for other Recovery in progress, go wait */
+	if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
+		return QLA_FUNCTION_FAILED;
+
+	/* Intent to Recover */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+	    (ha->portnum <<
+	     IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
+	msleep(200);
+
+	/* Check Intent to Recover is advertised */
+	lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+	if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
+	    IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
+		return QLA_FUNCTION_FAILED;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
+	    , __func__, ha->portnum);
+
+	/* Proceed to Recover */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+	    (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
+	    PROCEED_TO_RECOVER);
+
+	/* Force Unlock() */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
+	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+
+	/* Clear bits 0-5 in IDC_RECOVERY register*/
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
+
+	/* Get lock() */
+	lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+	if (lock) {
+		lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
+		qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
+		return QLA_SUCCESS;
+	} else
+		return QLA_FUNCTION_FAILED;
+}
+
+int
+qla8044_idc_lock(struct qla_hw_data *ha)
+{
+	uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
+	uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	while (status == 0) {
+		/* acquire semaphore5 from PCI HW block */
+		status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+
+		if (status) {
+			/* Increment Counter (8-31) and update func_num (0-7) on
+			 * getting a successful lock  */
+			lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
+			qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
+			break;
+		}
+
+		if (timeout == 0)
+			first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+		if (++timeout >=
+		    (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
+			tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+			func_num = tmo_owner & 0xFF;
+			lock_cnt = tmo_owner >> 8;
+			ql_log(ql_log_warn, vha, 0xb114,
+			    "%s: Lock by func %d failed after 2s, lock held "
+			    "by func %d, lock count %d, first_owner %d\n",
+			    __func__, ha->portnum, func_num, lock_cnt,
+			    (first_owner & 0xFF));
+			if (first_owner != tmo_owner) {
+				/* Some other driver got lock,
+				 * OR same driver got lock again (counter
+				 * value changed), when we were waiting for
+				 * lock. Retry for another 2 sec */
+				ql_dbg(ql_dbg_p3p, vha, 0xb115,
+				    "%s: %d: IDC lock failed\n",
+				    __func__, ha->portnum);
+				timeout = 0;
+			} else {
+				/* Same driver holding lock > 2sec.
+				 * Force Recovery */
+				if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
+					/* Recovered and got lock */
+					ret_val = QLA_SUCCESS;
+					ql_dbg(ql_dbg_p3p, vha, 0xb116,
+					    "%s:IDC lock Recovery by %d"
+					    "successful...\n", __func__,
+					     ha->portnum);
+				}
+				/* Recovery Failed, some other function
+				 * has the lock, wait for 2secs
+				 * and retry
+				 */
+				ql_dbg(ql_dbg_p3p, vha, 0xb08a,
+				       "%s: IDC lock Recovery by %d "
+				       "failed, Retrying timeout\n", __func__,
+				       ha->portnum);
+				timeout = 0;
+			}
+		}
+		msleep(QLA8044_DRV_LOCK_MSLEEP);
+	}
+	return ret_val;
+}
+
+void
+qla8044_idc_unlock(struct qla_hw_data *ha)
+{
+	int id;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+	if ((id & 0xFF) != ha->portnum) {
+		ql_log(ql_log_warn, vha, 0xb118,
+		    "%s: IDC Unlock by %d failed, lock owner is %d!\n",
+		    __func__, ha->portnum, (id & 0xFF));
+		return;
+	}
+
+	/* Keep lock counter value, update the ha->func_num to 0xFF */
+	qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
+	qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+}
+
+/* 8044 Flash Lock/Unlock functions */
+static int
+qla8044_flash_lock(scsi_qla_host_t *vha)
+{
+	int lock_owner;
+	int timeout = 0;
+	uint32_t lock_status = 0;
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	while (lock_status == 0) {
+		lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
+		if (lock_status)
+			break;
+
+		if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
+			lock_owner = qla8044_rd_reg(ha,
+			    QLA8044_FLASH_LOCK_ID);
+			ql_log(ql_log_warn, vha, 0xb113,
+			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+			    __func__, ha->portnum, lock_owner);
+			ret_val = QLA_FUNCTION_FAILED;
+			break;
+		}
+		msleep(20);
+	}
+	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
+	return ret_val;
+}
+
+static void
+qla8044_flash_unlock(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Reading FLASH_UNLOCK register unlocks the Flash */
+	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
+	qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+}
+
+
+static
+void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
+{
+
+	if (qla8044_flash_lock(vha)) {
+		/* Someone else is holding the lock. */
+		ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
+	}
+
+	/*
+	 * Either we got the lock, or someone
+	 * else died while holding it.
+	 * In either case, unlock.
+	 */
+	qla8044_flash_unlock(vha);
+}
+
+/*
+ * Address and length are byte address
+ */
+static int
+qla8044_read_flash_data(scsi_qla_host_t *vha,  uint8_t *p_data,
+	uint32_t flash_addr, int u32_word_count)
+{
+	int i, ret_val = QLA_SUCCESS;
+	uint32_t u32_word;
+
+	if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_lock_error;
+	}
+
+	if (flash_addr & 0x03) {
+		ql_log(ql_log_warn, vha, 0xb117,
+		    "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_flash_read;
+	}
+
+	for (i = 0; i < u32_word_count; i++) {
+		if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
+		    (flash_addr & 0xFFFF0000))) {
+			ql_log(ql_log_warn, vha, 0xb119,
+			    "%s: failed to write addr 0x%x to "
+			    "FLASH_DIRECT_WINDOW\n! ",
+			    __func__, flash_addr);
+			ret_val = QLA_FUNCTION_FAILED;
+			goto exit_flash_read;
+		}
+
+		ret_val = qla8044_rd_reg_indirect(vha,
+		    QLA8044_FLASH_DIRECT_DATA(flash_addr),
+		    &u32_word);
+		if (ret_val != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0xb08c,
+			    "%s: failed to read addr 0x%x!\n",
+			    __func__, flash_addr);
+			goto exit_flash_read;
+		}
+
+		*(uint32_t *)p_data = u32_word;
+		p_data = p_data + 4;
+		flash_addr = flash_addr + 4;
+	}
+
+exit_flash_read:
+	qla8044_flash_unlock(vha);
+
+exit_lock_error:
+	return ret_val;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+	uint32_t offset, uint32_t length)
+{
+	scsi_block_requests(vha->host);
+	if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+	    != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha,  0xb08d,
+		    "%s: Failed to read from flash\n",
+		    __func__);
+	}
+	scsi_unblock_requests(vha->host);
+	return buf;
+}
+
+static inline int
+qla8044_need_reset(struct scsi_qla_host *vha)
+{
+	uint32_t drv_state, drv_active;
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+	rval = drv_state & (1 << ha->portnum);
+
+	if (ha->flags.eeh_busy && drv_active)
+		rval = 1;
+	return rval;
+}
+
+/*
+ * qla8044_write_list - Write the value (p_entry->arg2) to address specified
+ * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
+ * entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_write_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	struct qla8044_entry *p_entry;
+	uint32_t i;
+
+	p_entry = (struct qla8044_entry *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+/*
+ * qla8044_read_write_list - Read from address specified by p_entry->arg1,
+ * write value read to address specified by p_entry->arg2, for all entries in
+ * header with delay of p_hdr->delay between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_read_write_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	struct qla8044_entry *p_entry;
+	uint32_t i;
+
+	p_entry = (struct qla8044_entry *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_read_write_crb_reg(vha, p_entry->arg1,
+		    p_entry->arg2);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+/*
+ * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
+	int duration, uint32_t test_mask, uint32_t test_result)
+{
+	uint32_t value;
+	int timeout_error;
+	uint8_t retries;
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		timeout_error = 1;
+		goto exit_poll_reg;
+	}
+
+	/* poll every 1/10 of the total duration */
+	retries = duration/10;
+
+	do {
+		if ((value & test_mask) != test_result) {
+			timeout_error = 1;
+			msleep(duration/10);
+			ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+			if (ret_val == QLA_FUNCTION_FAILED) {
+				timeout_error = 1;
+				goto exit_poll_reg;
+			}
+		} else {
+			timeout_error = 0;
+			break;
+		}
+	} while (retries--);
+
+exit_poll_reg:
+	if (timeout_error) {
+		vha->reset_tmplt.seq_error++;
+		ql_log(ql_log_fatal, vha, 0xb090,
+		    "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+		    __func__, value, test_mask, test_result);
+	}
+
+	return timeout_error;
+}
+
+/*
+ * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
+ * register specified by p_entry->arg1 and compare (value AND test_mask) with
+ * test_result to validate it. Wait for p_hdr->delay between processing entries.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for POLL_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	struct qla8044_entry *p_entry;
+	struct qla8044_poll *p_poll;
+	uint32_t i;
+	uint32_t value;
+
+	p_poll = (struct qla8044_poll *)
+		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+	/* Entries start after 8 byte qla8044_poll, poll header contains
+	 * the test_mask, test_value.
+	 */
+	p_entry = (struct qla8044_entry *)((char *)p_poll +
+	    sizeof(struct qla8044_poll));
+
+	delay = (long)p_hdr->delay;
+
+	if (!delay) {
+		for (i = 0; i < p_hdr->count; i++, p_entry++)
+			qla8044_poll_reg(vha, p_entry->arg1,
+			    delay, p_poll->test_mask, p_poll->test_value);
+	} else {
+		for (i = 0; i < p_hdr->count; i++, p_entry++) {
+			if (delay) {
+				if (qla8044_poll_reg(vha,
+				    p_entry->arg1, delay,
+				    p_poll->test_mask,
+				    p_poll->test_value)) {
+					/*If
+					* (data_read&test_mask != test_value)
+					* read TIMEOUT_ADDR (arg1) and
+					* ADDR (arg2) registers
+					*/
+					qla8044_rd_reg_indirect(vha,
+					    p_entry->arg1, &value);
+					qla8044_rd_reg_indirect(vha,
+					    p_entry->arg2, &value);
+				}
+			}
+		}
+	}
+}
+
+/*
+ * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
+ * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
+ * expires.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_write_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	struct qla8044_quad_entry *p_entry;
+	struct qla8044_poll *p_poll;
+	uint32_t i;
+
+	p_poll = (struct qla8044_poll *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
+	    sizeof(struct qla8044_poll));
+
+	delay = (long)p_hdr->delay;
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_wr_reg_indirect(vha,
+		    p_entry->dr_addr, p_entry->dr_value);
+		qla8044_wr_reg_indirect(vha,
+		    p_entry->ar_addr, p_entry->ar_value);
+		if (delay) {
+			if (qla8044_poll_reg(vha,
+			    p_entry->ar_addr, delay,
+			    p_poll->test_mask,
+			    p_poll->test_value)) {
+				ql_dbg(ql_dbg_p3p, vha, 0xb091,
+				    "%s: Timeout Error: poll list, ",
+				    __func__);
+				ql_dbg(ql_dbg_p3p, vha, 0xb092,
+				    "item_num %d, entry_num %d\n", i,
+				    vha->reset_tmplt.seq_index);
+			}
+		}
+	}
+}
+
+/*
+ * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
+ * value, write value to p_entry->arg2. Process entries with p_hdr->delay
+ * between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_read_modify_write(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	struct qla8044_entry *p_entry;
+	struct qla8044_rmw *p_rmw_hdr;
+	uint32_t i;
+
+	p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
+	    sizeof(struct qla8044_reset_entry_hdr));
+
+	p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
+	    sizeof(struct qla8044_rmw));
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_rmw_crb_reg(vha, p_entry->arg1,
+		    p_entry->arg2, p_rmw_hdr);
+		if (p_hdr->delay)
+			udelay((uint32_t)(p_hdr->delay));
+	}
+}
+
+/*
+ * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
+ * two entries of a sequence.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static
+void qla8044_pause(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	if (p_hdr->delay)
+		mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+/*
+ * qla8044_template_end - Indicates end of reset sequence processing.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_template_end(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	vha->reset_tmplt.template_end = 1;
+
+	if (vha->reset_tmplt.seq_error == 0) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb093,
+		    "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
+	} else {
+		ql_log(ql_log_fatal, vha, 0xb094,
+		    "%s: Reset sequence completed with some timeout "
+		    "errors.\n", __func__);
+	}
+}
+
+/*
+ * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
+ * if (value & test_mask != test_value) re-read till timeout value expires,
+ * read dr_addr register and assign to reset_tmplt.array.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_poll_read_list(struct scsi_qla_host *vha,
+	struct qla8044_reset_entry_hdr *p_hdr)
+{
+	long delay;
+	int index;
+	struct qla8044_quad_entry *p_entry;
+	struct qla8044_poll *p_poll;
+	uint32_t i;
+	uint32_t value;
+
+	p_poll = (struct qla8044_poll *)
+		((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+	p_entry = (struct qla8044_quad_entry *)
+		((char *)p_poll + sizeof(struct qla8044_poll));
+
+	delay = (long)p_hdr->delay;
+
+	for (i = 0; i < p_hdr->count; i++, p_entry++) {
+		qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
+		    p_entry->ar_value);
+		if (delay) {
+			if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
+			    p_poll->test_mask, p_poll->test_value)) {
+				ql_dbg(ql_dbg_p3p, vha, 0xb095,
+				    "%s: Timeout Error: poll "
+				    "list, ", __func__);
+				ql_dbg(ql_dbg_p3p, vha, 0xb096,
+				    "Item_num %d, "
+				    "entry_num %d\n", i,
+				    vha->reset_tmplt.seq_index);
+			} else {
+				index = vha->reset_tmplt.array_index;
+				qla8044_rd_reg_indirect(vha,
+				    p_entry->dr_addr, &value);
+				vha->reset_tmplt.array[index++] = value;
+				if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
+					vha->reset_tmplt.array_index = 1;
+			}
+		}
+	}
+}
+
+/*
+ * qla8031_process_reset_template - Process all entries in reset template
+ * till entry with SEQ_END opcode, which indicates end of the reset template
+ * processing. Each entry has a Reset Entry header, entry opcode/command, with
+ * size of the entry, number of entries in sub-sequence and delay in microsecs
+ * or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ *
+ */
+static void
+qla8044_process_reset_template(struct scsi_qla_host *vha,
+	char *p_buff)
+{
+	int index, entries;
+	struct qla8044_reset_entry_hdr *p_hdr;
+	char *p_entry = p_buff;
+
+	vha->reset_tmplt.seq_end = 0;
+	vha->reset_tmplt.template_end = 0;
+	entries = vha->reset_tmplt.hdr->entries;
+	index = vha->reset_tmplt.seq_index;
+
+	for (; (!vha->reset_tmplt.seq_end) && (index  < entries); index++) {
+		p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
+		switch (p_hdr->cmd) {
+		case OPCODE_NOP:
+			break;
+		case OPCODE_WRITE_LIST:
+			qla8044_write_list(vha, p_hdr);
+			break;
+		case OPCODE_READ_WRITE_LIST:
+			qla8044_read_write_list(vha, p_hdr);
+			break;
+		case OPCODE_POLL_LIST:
+			qla8044_poll_list(vha, p_hdr);
+			break;
+		case OPCODE_POLL_WRITE_LIST:
+			qla8044_poll_write_list(vha, p_hdr);
+			break;
+		case OPCODE_READ_MODIFY_WRITE:
+			qla8044_read_modify_write(vha, p_hdr);
+			break;
+		case OPCODE_SEQ_PAUSE:
+			qla8044_pause(vha, p_hdr);
+			break;
+		case OPCODE_SEQ_END:
+			vha->reset_tmplt.seq_end = 1;
+			break;
+		case OPCODE_TMPL_END:
+			qla8044_template_end(vha, p_hdr);
+			break;
+		case OPCODE_POLL_READ_LIST:
+			qla8044_poll_read_list(vha, p_hdr);
+			break;
+		default:
+			ql_log(ql_log_fatal, vha, 0xb097,
+			    "%s: Unknown command ==> 0x%04x on "
+			    "entry = %d\n", __func__, p_hdr->cmd, index);
+			break;
+		}
+		/*
+		 *Set pointer to next entry in the sequence.
+		*/
+		p_entry += p_hdr->size;
+	}
+	vha->reset_tmplt.seq_index = index;
+}
+
+static void
+qla8044_process_init_seq(struct scsi_qla_host *vha)
+{
+	qla8044_process_reset_template(vha,
+	    vha->reset_tmplt.init_offset);
+	if (vha->reset_tmplt.seq_end != 1)
+		ql_log(ql_log_fatal, vha, 0xb098,
+		    "%s: Abrupt INIT Sub-Sequence end.\n",
+		    __func__);
+}
+
+static void
+qla8044_process_stop_seq(struct scsi_qla_host *vha)
+{
+	vha->reset_tmplt.seq_index = 0;
+	qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
+	if (vha->reset_tmplt.seq_end != 1)
+		ql_log(ql_log_fatal, vha, 0xb099,
+		    "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
+}
+
+static void
+qla8044_process_start_seq(struct scsi_qla_host *vha)
+{
+	qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
+	if (vha->reset_tmplt.template_end != 1)
+		ql_log(ql_log_fatal, vha, 0xb09a,
+		    "%s: Abrupt START Sub-Sequence end.\n",
+		    __func__);
+}
+
+static int
+qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
+	uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
+{
+	uint32_t i;
+	uint32_t u32_word;
+	uint32_t flash_offset;
+	uint32_t addr = flash_addr;
+	int ret_val = QLA_SUCCESS;
+
+	flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
+
+	if (addr & 0x3) {
+		ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
+		    __func__, addr);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_lockless_read;
+	}
+
+	ret_val = qla8044_wr_reg_indirect(vha,
+	    QLA8044_FLASH_DIRECT_WINDOW, (addr));
+
+	if (ret_val != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0xb09c,
+		    "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+		    __func__, addr);
+		goto exit_lockless_read;
+	}
+
+	/* Check if data is spread across multiple sectors  */
+	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+	    (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+		/* Multi sector read */
+		for (i = 0; i < u32_word_count; i++) {
+			ret_val = qla8044_rd_reg_indirect(vha,
+			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+			if (ret_val != QLA_SUCCESS) {
+				ql_log(ql_log_fatal, vha, 0xb09d,
+				    "%s: failed to read addr 0x%x!\n",
+				    __func__, addr);
+				goto exit_lockless_read;
+			}
+			*(uint32_t *)p_data  = u32_word;
+			p_data = p_data + 4;
+			addr = addr + 4;
+			flash_offset = flash_offset + 4;
+			if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+				/* This write is needed once for each sector */
+				ret_val = qla8044_wr_reg_indirect(vha,
+				    QLA8044_FLASH_DIRECT_WINDOW, (addr));
+				if (ret_val != QLA_SUCCESS) {
+					ql_log(ql_log_fatal, vha, 0xb09f,
+					    "%s: failed to write addr "
+					    "0x%x to FLASH_DIRECT_WINDOW!\n",
+					    __func__, addr);
+					goto exit_lockless_read;
+				}
+				flash_offset = 0;
+			}
+		}
+	} else {
+		/* Single sector read */
+		for (i = 0; i < u32_word_count; i++) {
+			ret_val = qla8044_rd_reg_indirect(vha,
+			    QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+			if (ret_val != QLA_SUCCESS) {
+				ql_log(ql_log_fatal, vha, 0xb0a0,
+				    "%s: failed to read addr 0x%x!\n",
+				    __func__, addr);
+				goto exit_lockless_read;
+			}
+			*(uint32_t *)p_data = u32_word;
+			p_data = p_data + 4;
+			addr = addr + 4;
+		}
+	}
+
+exit_lockless_read:
+	return ret_val;
+}
+
+/*
+ * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
+ *
+ * @vha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * data : Data to be written
+ * count : word_count to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
+	uint64_t addr, uint32_t *data, uint32_t count)
+{
+	int i, j, ret_val = QLA_SUCCESS;
+	uint32_t agt_ctrl;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Only 128-bit aligned access */
+	if (addr & 0xF) {
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_ms_mem_write;
+	}
+	write_lock_irqsave(&ha->hw_lock, flags);
+
+	/* Write address */
+	ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		ql_log(ql_log_fatal, vha, 0xb0a1,
+		    "%s: write to AGT_ADDR_HI failed!\n", __func__);
+		goto exit_ms_mem_write_unlock;
+	}
+
+	for (i = 0; i < count; i++, addr += 16) {
+		if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
+		    QLA8044_ADDR_QDR_NET_MAX)) ||
+		    (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
+			QLA8044_ADDR_DDR_NET_MAX)))) {
+			ret_val = QLA_FUNCTION_FAILED;
+			goto exit_ms_mem_write_unlock;
+		}
+
+		ret_val = qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_ADDR_LO, addr);
+
+		/* Write data */
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_LO, *data++);
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_HI, *data++);
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
+		ret_val += qla8044_wr_reg_indirect(vha,
+		    MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
+		if (ret_val == QLA_FUNCTION_FAILED) {
+			ql_log(ql_log_fatal, vha, 0xb0a2,
+			    "%s: write to AGT_WRDATA failed!\n",
+			    __func__);
+			goto exit_ms_mem_write_unlock;
+		}
+
+		/* Check write status */
+		ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+		    MIU_TA_CTL_WRITE_ENABLE);
+		ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+		    MIU_TA_CTL_WRITE_START);
+		if (ret_val == QLA_FUNCTION_FAILED) {
+			ql_log(ql_log_fatal, vha, 0xb0a3,
+			    "%s: write to AGT_CTRL failed!\n", __func__);
+			goto exit_ms_mem_write_unlock;
+		}
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			ret_val = qla8044_rd_reg_indirect(vha,
+			    MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
+			if (ret_val == QLA_FUNCTION_FAILED) {
+				ql_log(ql_log_fatal, vha, 0xb0a4,
+				    "%s: failed to read "
+				    "MD_MIU_TEST_AGT_CTRL!\n", __func__);
+				goto exit_ms_mem_write_unlock;
+			}
+			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		/* Status check failed */
+		if (j >= MAX_CTL_CHECK) {
+			ql_log(ql_log_fatal, vha, 0xb0a5,
+			    "%s: MS memory write failed!\n",
+			   __func__);
+			ret_val = QLA_FUNCTION_FAILED;
+			goto exit_ms_mem_write_unlock;
+		}
+	}
+
+exit_ms_mem_write_unlock:
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+	return ret_val;
+}
+
+static int
+qla8044_copy_bootloader(struct scsi_qla_host *vha)
+{
+	uint8_t *p_cache;
+	uint32_t src, count, size;
+	uint64_t dest;
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	src = QLA8044_BOOTLOADER_FLASH_ADDR;
+	dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
+	size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
+
+	/* 128 bit alignment check */
+	if (size & 0xF)
+		size = (size + 16) & ~0xF;
+
+	/* 16 byte count */
+	count = size/16;
+
+	p_cache = vmalloc(size);
+	if (p_cache == NULL) {
+		ql_log(ql_log_fatal, vha, 0xb0a6,
+		    "%s: Failed to allocate memory for "
+		    "boot loader cache\n", __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_copy_bootloader;
+	}
+
+	ret_val = qla8044_lockless_flash_read_u32(vha, src,
+	    p_cache, size/sizeof(uint32_t));
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		ql_log(ql_log_fatal, vha, 0xb0a7,
+		    "%s: Error reading F/W from flash!!!\n", __func__);
+		goto exit_copy_error;
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
+	    __func__);
+
+	/* 128 bit/16 byte write to MS memory */
+	ret_val = qla8044_ms_mem_write_128b(vha, dest,
+	    (uint32_t *)p_cache, count);
+	if (ret_val == QLA_FUNCTION_FAILED) {
+		ql_log(ql_log_fatal, vha, 0xb0a9,
+		    "%s: Error writing F/W to MS !!!\n", __func__);
+		goto exit_copy_error;
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
+	    "%s: Wrote F/W (size %d) to MS !!!\n",
+	    __func__, size);
+
+exit_copy_error:
+	vfree(p_cache);
+
+exit_copy_bootloader:
+	return ret_val;
+}
+
+static int
+qla8044_restart(struct scsi_qla_host *vha)
+{
+	int ret_val = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla8044_process_stop_seq(vha);
+
+	/* Collect minidump */
+	if (ql2xmdenable)
+		qla8044_get_minidump(vha);
+	else
+		ql_log(ql_log_fatal, vha, 0xb14c,
+		    "Minidump disabled.\n");
+
+	qla8044_process_init_seq(vha);
+
+	if (qla8044_copy_bootloader(vha)) {
+		ql_log(ql_log_fatal, vha, 0xb0ab,
+		    "%s: Copy bootloader, firmware restart failed!\n",
+		    __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_restart;
+	}
+
+	/*
+	 *  Loads F/W from flash
+	 */
+	qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
+
+	qla8044_process_start_seq(vha);
+
+exit_restart:
+	return ret_val;
+}
+
+/*
+ * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
+ * initialized.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
+{
+	uint32_t val, ret_val = QLA_FUNCTION_FAILED;
+	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+	struct qla_hw_data *ha = vha->hw;
+
+	do {
+		val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
+		if (val == PHAN_INITIALIZE_COMPLETE) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
+			    "%s: Command Peg initialization "
+			    "complete! state=0x%x\n", __func__, val);
+			ret_val = QLA_SUCCESS;
+			break;
+		}
+		msleep(CRB_CMDPEG_CHECK_DELAY);
+	} while (--retries);
+
+	return ret_val;
+}
+
+static int
+qla8044_start_firmware(struct scsi_qla_host *vha)
+{
+	int ret_val = QLA_SUCCESS;
+
+	if (qla8044_restart(vha)) {
+		ql_log(ql_log_fatal, vha, 0xb0ad,
+		    "%s: Restart Error!!!, Need Reset!!!\n",
+		    __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+		goto exit_start_fw;
+	} else
+		ql_dbg(ql_dbg_p3p, vha, 0xb0af,
+		    "%s: Restart done!\n", __func__);
+
+	ret_val = qla8044_check_cmd_peg_status(vha);
+	if (ret_val) {
+		ql_log(ql_log_fatal, vha, 0xb0b0,
+		    "%s: Peg not initialized!\n", __func__);
+		ret_val = QLA_FUNCTION_FAILED;
+	}
+
+exit_start_fw:
+	return ret_val;
+}
+
+void
+qla8044_clear_drv_active(struct qla_hw_data *ha)
+{
+	uint32_t drv_active;
+	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	drv_active &= ~(1 << (ha->portnum));
+
+	ql_log(ql_log_info, vha, 0xb0b1,
+	    "%s(%ld): drv_active: 0x%08x\n",
+	    __func__, vha->host_no, drv_active);
+
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+/*
+ * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static int
+qla8044_device_bootstrap(struct scsi_qla_host *vha)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	int i;
+	uint32_t old_count = 0, count = 0;
+	int need_reset = 0;
+	uint32_t idc_ctrl;
+	struct qla_hw_data *ha = vha->hw;
+
+	need_reset = qla8044_need_reset(vha);
+
+	if (!need_reset) {
+		old_count = qla8044_rd_direct(vha,
+		    QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+		for (i = 0; i < 10; i++) {
+			msleep(200);
+
+			count = qla8044_rd_direct(vha,
+			    QLA8044_PEG_ALIVE_COUNTER_INDEX);
+			if (count != old_count) {
+				rval = QLA_SUCCESS;
+				goto dev_ready;
+			}
+		}
+		qla8044_flash_lock_recovery(vha);
+	} else {
+		/* We are trying to perform a recovery here. */
+		if (ha->flags.isp82xx_fw_hung)
+			qla8044_flash_lock_recovery(vha);
+	}
+
+	/* set to DEV_INITIALIZING */
+	ql_log(ql_log_info, vha, 0xb0b2,
+	    "%s: HW State: INITIALIZING\n", __func__);
+	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+	    QLA8XXX_DEV_INITIALIZING);
+
+	qla8044_idc_unlock(ha);
+	rval = qla8044_start_firmware(vha);
+	qla8044_idc_lock(ha);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_info, vha, 0xb0b3,
+		     "%s: HW State: FAILED\n", __func__);
+		qla8044_clear_drv_active(ha);
+		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+		    QLA8XXX_DEV_FAILED);
+		return rval;
+	}
+
+	/* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
+	 * device goes to INIT state. */
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+		    (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+		ha->fw_dumped = 0;
+	}
+
+dev_ready:
+	ql_log(ql_log_info, vha, 0xb0b4,
+	    "%s: HW State: READY\n", __func__);
+	qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
+
+	return rval;
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+static void
+qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
+{
+	u8 *phdr;
+
+	if (!vha->reset_tmplt.buff) {
+		ql_log(ql_log_fatal, vha, 0xb0b5,
+		    "%s: Error Invalid reset_seq_template\n", __func__);
+		return;
+	}
+
+	phdr = vha->reset_tmplt.buff;
+	ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
+	    "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
+	    "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
+	    "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
+	    *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+	    *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+	    *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+	    *(phdr+13), *(phdr+14), *(phdr+15));
+}
+
+/*
+ * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
+{
+	uint32_t sum =  0;
+	uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
+	int u16_count =  vha->reset_tmplt.hdr->size / sizeof(uint16_t);
+
+	while (u16_count-- > 0)
+		sum += *buff++;
+
+	while (sum >> 16)
+		sum = (sum & 0xFFFF) +  (sum >> 16);
+
+	/* checksum of 0 indicates a valid template */
+	if (~sum) {
+		return QLA_SUCCESS;
+	} else {
+		ql_log(ql_log_fatal, vha, 0xb0b7,
+		    "%s: Reset seq checksum failed\n", __func__);
+		return QLA_FUNCTION_FAILED;
+	}
+}
+
+/*
+ * qla8044_read_reset_template - Read Reset Template from Flash, validate
+ * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
+ *
+ * @ha : Pointer to adapter structure
+ */
+void
+qla8044_read_reset_template(struct scsi_qla_host *vha)
+{
+	uint8_t *p_buff;
+	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+
+	vha->reset_tmplt.seq_error = 0;
+	vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
+	if (vha->reset_tmplt.buff == NULL) {
+		ql_log(ql_log_fatal, vha, 0xb0b8,
+		    "%s: Failed to allocate reset template resources\n",
+		    __func__);
+		goto exit_read_reset_template;
+	}
+
+	p_buff = vha->reset_tmplt.buff;
+	addr = QLA8044_RESET_TEMPLATE_ADDR;
+
+	tmplt_hdr_def_size =
+	    sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
+	    "%s: Read template hdr size %d from Flash\n",
+	    __func__, tmplt_hdr_def_size);
+
+	/* Copy template header from flash */
+	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+		ql_log(ql_log_fatal, vha, 0xb0ba,
+		    "%s: Failed to read reset template\n", __func__);
+		goto exit_read_template_error;
+	}
+
+	vha->reset_tmplt.hdr =
+	 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
+
+	/* Validate the template header size and signature */
+	tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+	    (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+		ql_log(ql_log_fatal, vha, 0xb0bb,
+		    "%s: Template Header size invalid %d "
+		    "tmplt_hdr_def_size %d!!!\n", __func__,
+		    tmplt_hdr_size, tmplt_hdr_def_size);
+		goto exit_read_template_error;
+	}
+
+	addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
+	p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
+	tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
+	    vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
+	    "%s: Read rest of the template size %d\n",
+	    __func__, vha->reset_tmplt.hdr->size);
+
+	/* Copy rest of the template */
+	if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+		ql_log(ql_log_fatal, vha, 0xb0bd,
+		    "%s: Failed to read reset template\n", __func__);
+		goto exit_read_template_error;
+	}
+
+	/* Integrity check */
+	if (qla8044_reset_seq_checksum_test(vha)) {
+		ql_log(ql_log_fatal, vha, 0xb0be,
+		    "%s: Reset Seq checksum failed!\n", __func__);
+		goto exit_read_template_error;
+	}
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
+	    "%s: Reset Seq checksum passed! Get stop, "
+	    "start and init seq offsets\n", __func__);
+
+	/* Get STOP, START, INIT sequence offsets */
+	vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
+	    vha->reset_tmplt.hdr->init_seq_offset;
+
+	vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
+	    vha->reset_tmplt.hdr->start_seq_offset;
+
+	vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
+	    vha->reset_tmplt.hdr->hdr_size;
+
+	qla8044_dump_reset_seq_hdr(vha);
+
+	goto exit_read_reset_template;
+
+exit_read_template_error:
+	vfree(vha->reset_tmplt.buff);
+
+exit_read_reset_template:
+	return;
+}
+
+void
+qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
+{
+	uint32_t idc_ctrl;
+	struct qla_hw_data *ha = vha->hw;
+
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	idc_ctrl |= DONTRESET_BIT0;
+	ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
+	    "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
+	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+static inline void
+qla8044_set_rst_ready(struct scsi_qla_host *vha)
+{
+	uint32_t drv_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+	/* For ISP8044, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.*/
+	drv_state |= (1 << ha->portnum);
+
+	ql_log(ql_log_info, vha, 0xb0c1,
+	    "%s(%ld): drv_state: 0x%08x\n",
+	    __func__, vha->host_no, drv_state);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+/**
+ * qla8044_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla8044_need_reset_handler(struct scsi_qla_host *vha)
+{
+	uint32_t dev_state = 0, drv_state, drv_active;
+	unsigned long reset_timeout;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_log(ql_log_fatal, vha, 0xb0c2,
+	    "%s: Performing ISP error recovery\n", __func__);
+
+	if (vha->flags.online) {
+		qla8044_idc_unlock(ha);
+		qla2x00_abort_isp_cleanup(vha);
+		ha->isp_ops->get_flash_version(vha, vha->req->ring);
+		ha->isp_ops->nvram_config(vha);
+		qla8044_idc_lock(ha);
+	}
+
+	dev_state = qla8044_rd_direct(vha,
+	    QLA8044_CRB_DEV_STATE_INDEX);
+	drv_state = qla8044_rd_direct(vha,
+	    QLA8044_CRB_DRV_STATE_INDEX);
+	drv_active = qla8044_rd_direct(vha,
+	    QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	ql_log(ql_log_info, vha, 0xb0c5,
+	    "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
+	    __func__, vha->host_no, drv_state, drv_active, dev_state);
+
+	qla8044_set_rst_ready(vha);
+
+	/* wait for 10 seconds for reset ack from all functions */
+	reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+	do {
+		if (time_after_eq(jiffies, reset_timeout)) {
+			ql_log(ql_log_info, vha, 0xb0c4,
+			    "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
+			    __func__, ha->portnum, drv_state, drv_active);
+			break;
+		}
+
+		qla8044_idc_unlock(ha);
+		msleep(1000);
+		qla8044_idc_lock(ha);
+
+		dev_state = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DEV_STATE_INDEX);
+		drv_state = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_STATE_INDEX);
+		drv_active = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_ACTIVE_INDEX);
+	} while (((drv_state & drv_active) != drv_active) &&
+	    (dev_state == QLA8XXX_DEV_NEED_RESET));
+
+	/* Remove IDC participation of functions not acknowledging */
+	if (drv_state != drv_active) {
+		ql_log(ql_log_info, vha, 0xb0c7,
+		    "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
+		    __func__, vha->host_no, ha->portnum,
+		    (drv_active ^ drv_state));
+		drv_active = drv_active & drv_state;
+		qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+		    drv_active);
+	} else {
+		/*
+		 * Reset owner should execute reset recovery,
+		 * if all functions acknowledged
+		 */
+		if ((ha->flags.nic_core_reset_owner) &&
+		    (dev_state == QLA8XXX_DEV_NEED_RESET)) {
+			ha->flags.nic_core_reset_owner = 0;
+			qla8044_device_bootstrap(vha);
+			return;
+		}
+	}
+
+	/* Exit if non active function */
+	if (!(drv_active & (1 << ha->portnum))) {
+		ha->flags.nic_core_reset_owner = 0;
+		return;
+	}
+
+	/*
+	 * Execute Reset Recovery if Reset Owner or Function 7
+	 * is the only active function
+	 */
+	if (ha->flags.nic_core_reset_owner ||
+	    ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
+		ha->flags.nic_core_reset_owner = 0;
+		qla8044_device_bootstrap(vha);
+	}
+}
+
+static void
+qla8044_set_drv_active(struct scsi_qla_host *vha)
+{
+	uint32_t drv_active;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	/* For ISP8044, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.*/
+	drv_active |= (1 << ha->portnum);
+
+	ql_log(ql_log_info, vha, 0xb0c8,
+	    "%s(%ld): drv_active: 0x%08x\n",
+	    __func__, vha->host_no, drv_active);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+static int
+qla8044_check_drv_active(struct scsi_qla_host *vha)
+{
+	uint32_t drv_active;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	if (drv_active & (1 << ha->portnum))
+		return QLA_SUCCESS;
+	else
+		return QLA_TEST_FAILED;
+}
+
+static void
+qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
+{
+	uint32_t idc_ctrl;
+	struct qla_hw_data *ha = vha->hw;
+
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	idc_ctrl &= ~DONTRESET_BIT0;
+	ql_log(ql_log_info, vha, 0xb0c9,
+	    "%s: idc_ctrl = %d\n", __func__,
+	    idc_ctrl);
+	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+static int
+qla8044_set_idc_ver(struct scsi_qla_host *vha)
+{
+	int idc_ver;
+	uint32_t drv_active;
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+	if (drv_active == (1 << ha->portnum)) {
+		idc_ver = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+		idc_ver &= (~0xFF);
+		idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
+		qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+		    idc_ver);
+		ql_log(ql_log_info, vha, 0xb0ca,
+		    "%s: IDC version updated to %d\n",
+		    __func__, idc_ver);
+	} else {
+		idc_ver = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+		idc_ver &= 0xFF;
+		if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
+			ql_log(ql_log_info, vha, 0xb0cb,
+			    "%s: qla4xxx driver IDC version %d "
+			    "is not compatible with IDC version %d "
+			    "of other drivers!\n",
+			    __func__, QLA8044_IDC_VER_MAJ_VALUE,
+			    idc_ver);
+			rval = QLA_FUNCTION_FAILED;
+			goto exit_set_idc_ver;
+		}
+	}
+
+	/* Update IDC_MINOR_VERSION */
+	idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
+	idc_ver &= ~(0x03 << (ha->portnum * 2));
+	idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
+	qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+	return rval;
+}
+
+static int
+qla8044_update_idc_reg(struct scsi_qla_host *vha)
+{
+	uint32_t drv_active;
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->flags.init_done)
+		goto exit_update_idc_reg;
+
+	qla8044_idc_lock(ha);
+	qla8044_set_drv_active(vha);
+
+	drv_active = qla8044_rd_direct(vha,
+	    QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	/* If we are the first driver to load and
+	 * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
+	if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
+		qla8044_clear_idc_dontreset(vha);
+
+	rval = qla8044_set_idc_ver(vha);
+	if (rval == QLA_FUNCTION_FAILED)
+		qla8044_clear_drv_active(ha);
+	qla8044_idc_unlock(ha);
+
+exit_update_idc_reg:
+	return rval;
+}
+
+/**
+ * qla8044_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+static void
+qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
+{
+	unsigned long qsnt_timeout;
+	uint32_t drv_state, drv_active, dev_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->flags.online)
+		qla2x00_quiesce_io(vha);
+	else
+		return;
+
+	qla8044_set_qsnt_ready(vha);
+
+	/* Wait for 30 secs for all functions to ack qsnt mode */
+	qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+	/* Shift drv_active by 1 to match drv_state. As quiescent ready bit
+	   position is at bit 1 and drv active is at bit 0 */
+	drv_active = drv_active << 1;
+
+	while (drv_state != drv_active) {
+		if (time_after_eq(jiffies, qsnt_timeout)) {
+			/* Other functions did not ack, changing state to
+			 * DEV_READY
+			 */
+			clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+					    QLA8XXX_DEV_READY);
+			qla8044_clear_qsnt_ready(vha);
+			ql_log(ql_log_info, vha, 0xb0cc,
+			    "Timeout waiting for quiescent ack!!!\n");
+			return;
+		}
+		qla8044_idc_unlock(ha);
+		msleep(1000);
+		qla8044_idc_lock(ha);
+
+		drv_state = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_STATE_INDEX);
+		drv_active = qla8044_rd_direct(vha,
+		    QLA8044_CRB_DRV_ACTIVE_INDEX);
+		drv_active = drv_active << 1;
+	}
+
+	/* All functions have Acked. Set quiescent state */
+	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+	if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+		    QLA8XXX_DEV_QUIESCENT);
+		ql_log(ql_log_info, vha, 0xb0cd,
+		    "%s: HW State: QUIESCENT\n", __func__);
+	}
+}
+
+/*
+ * qla8044_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int
+qla8044_device_state_handler(struct scsi_qla_host *vha)
+{
+	uint32_t dev_state;
+	int rval = QLA_SUCCESS;
+	unsigned long dev_init_timeout;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = qla8044_update_idc_reg(vha);
+	if (rval == QLA_FUNCTION_FAILED)
+		goto exit_error;
+
+	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+	ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
+	    "Device state is 0x%x = %s\n",
+	    dev_state, dev_state < MAX_STATES ?
+	    qdev_state(dev_state) : "Unknown");
+
+	/* wait for 30 seconds for device to go ready */
+	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+	qla8044_idc_lock(ha);
+
+	while (1) {
+		if (time_after_eq(jiffies, dev_init_timeout)) {
+			if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0xb0cf,
+				    "%s: Device Init Failed 0x%x = %s\n",
+				    QLA2XXX_DRIVER_NAME, dev_state,
+				    dev_state < MAX_STATES ?
+				    qdev_state(dev_state) : "Unknown");
+				qla8044_wr_direct(vha,
+				    QLA8044_CRB_DEV_STATE_INDEX,
+				    QLA8XXX_DEV_FAILED);
+			}
+		}
+
+		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+		ql_log(ql_log_info, vha, 0xb0d0,
+		    "Device state is 0x%x = %s\n",
+		    dev_state, dev_state < MAX_STATES ?
+		    qdev_state(dev_state) : "Unknown");
+
+		/* NOTE: Make sure idc unlocked upon exit of switch statement */
+		switch (dev_state) {
+		case QLA8XXX_DEV_READY:
+			ha->flags.nic_core_reset_owner = 0;
+			goto exit;
+		case QLA8XXX_DEV_COLD:
+			rval = qla8044_device_bootstrap(vha);
+			break;
+		case QLA8XXX_DEV_INITIALIZING:
+			qla8044_idc_unlock(ha);
+			msleep(1000);
+			qla8044_idc_lock(ha);
+			break;
+		case QLA8XXX_DEV_NEED_RESET:
+			/* For ISP8044, if NEED_RESET is set by any driver,
+			 * it should be honored, irrespective of IDC_CTRL
+			 * DONTRESET_BIT0 */
+			qla8044_need_reset_handler(vha);
+			break;
+		case QLA8XXX_DEV_NEED_QUIESCENT:
+			/* idc locked/unlocked in handler */
+			qla8044_need_qsnt_handler(vha);
+
+			/* Reset the init timeout after qsnt handler */
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_reset_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_QUIESCENT:
+			ql_log(ql_log_info, vha, 0xb0d1,
+			    "HW State: QUIESCENT\n");
+
+			qla8044_idc_unlock(ha);
+			msleep(1000);
+			qla8044_idc_lock(ha);
+
+			/* Reset the init timeout after qsnt handler */
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_reset_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_FAILED:
+			ha->flags.nic_core_reset_owner = 0;
+			qla8044_idc_unlock(ha);
+			qla8xxx_dev_failed_handler(vha);
+			rval = QLA_FUNCTION_FAILED;
+			qla8044_idc_lock(ha);
+			goto exit;
+		default:
+			qla8044_idc_unlock(ha);
+			qla8xxx_dev_failed_handler(vha);
+			rval = QLA_FUNCTION_FAILED;
+			qla8044_idc_lock(ha);
+			goto exit;
+		}
+	}
+exit:
+	qla8044_idc_unlock(ha);
+
+exit_error:
+	return rval;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int
+qla8044_check_temp(struct scsi_qla_host *vha)
+{
+	uint32_t temp, temp_state, temp_val;
+	int status = QLA_SUCCESS;
+
+	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+	temp_state = qla82xx_get_temp_state(temp);
+	temp_val = qla82xx_get_temp_val(temp);
+
+	if (temp_state == QLA82XX_TEMP_PANIC) {
+		ql_log(ql_log_warn, vha, 0xb0d2,
+		    "Device temperature %d degrees C"
+		    " exceeds maximum allowed. Hardware has been shut"
+		    " down\n", temp_val);
+		status = QLA_FUNCTION_FAILED;
+		return status;
+	} else if (temp_state == QLA82XX_TEMP_WARN) {
+		ql_log(ql_log_warn, vha, 0xb0d3,
+		    "Device temperature %d"
+		    " degrees C exceeds operating range."
+		    " Immediate action needed.\n", temp_val);
+	}
+	return 0;
+}
+
+int qla8044_read_temperature(scsi_qla_host_t *vha)
+{
+	uint32_t temp;
+
+	temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+	return qla82xx_get_temp_val(temp);
+}
+
+/**
+ * qla8044_check_fw_alive  - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+int
+qla8044_check_fw_alive(struct scsi_qla_host *vha)
+{
+	uint32_t fw_heartbeat_counter;
+	uint32_t halt_status1, halt_status2;
+	int status = QLA_SUCCESS;
+
+	fw_heartbeat_counter = qla8044_rd_direct(vha,
+	    QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+	if (fw_heartbeat_counter == 0xffffffff) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
+		    "scsi%ld: %s: Device in frozen "
+		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+		    vha->host_no, __func__);
+		return status;
+	}
+
+	if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+		vha->seconds_since_last_heartbeat++;
+		/* FW not alive after 2 seconds */
+		if (vha->seconds_since_last_heartbeat == 2) {
+			vha->seconds_since_last_heartbeat = 0;
+			halt_status1 = qla8044_rd_direct(vha,
+			    QLA8044_PEG_HALT_STATUS1_INDEX);
+			halt_status2 = qla8044_rd_direct(vha,
+			    QLA8044_PEG_HALT_STATUS2_INDEX);
+
+			ql_log(ql_log_info, vha, 0xb0d5,
+			    "scsi(%ld): %s, ISP8044 "
+			    "Dumping hw/fw registers:\n"
+			    " PEG_HALT_STATUS1: 0x%x, "
+			    "PEG_HALT_STATUS2: 0x%x,\n",
+			    vha->host_no, __func__, halt_status1,
+			    halt_status2);
+			status = QLA_FUNCTION_FAILED;
+		}
+	} else
+		vha->seconds_since_last_heartbeat = 0;
+
+	vha->fw_heartbeat_counter = fw_heartbeat_counter;
+	return status;
+}
+
+void
+qla8044_watchdog(struct scsi_qla_host *vha)
+{
+	uint32_t dev_state, halt_status;
+	int halt_status_unrecoverable = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* don't poll if reset is going on or FW hang in quiescent state */
+	if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
+		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+		if (qla8044_check_fw_alive(vha)) {
+			ha->flags.isp82xx_fw_hung = 1;
+			ql_log(ql_log_warn, vha, 0xb10a,
+			    "Firmware hung.\n");
+			qla82xx_clear_pending_mbx(vha);
+		}
+
+		if (qla8044_check_temp(vha)) {
+			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+			ha->flags.isp82xx_fw_hung = 1;
+			qla2xxx_wake_dpc(vha);
+		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+			   !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+			ql_log(ql_log_info, vha, 0xb0d6,
+			    "%s: HW State: NEED RESET!\n",
+			    __func__);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+		    !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+			ql_log(ql_log_info, vha, 0xb0d7,
+			    "%s: HW State: NEED QUIES detected!\n",
+			    __func__);
+			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		} else  {
+			/* Check firmware health */
+			if (ha->flags.isp82xx_fw_hung) {
+				halt_status = qla8044_rd_direct(vha,
+					QLA8044_PEG_HALT_STATUS1_INDEX);
+				if (halt_status &
+				    QLA8044_HALT_STATUS_FW_RESET) {
+					ql_log(ql_log_fatal, vha,
+					    0xb0d8, "%s: Firmware "
+					    "error detected device "
+					    "is being reset\n",
+					    __func__);
+				} else if (halt_status &
+					    QLA8044_HALT_STATUS_UNRECOVERABLE) {
+						halt_status_unrecoverable = 1;
+				}
+
+				/* Since we cannot change dev_state in interrupt
+				 * context, set appropriate DPC flag then wakeup
+				 *  DPC */
+				if (halt_status_unrecoverable) {
+					set_bit(ISP_UNRECOVERABLE,
+					    &vha->dpc_flags);
+				} else {
+					if (dev_state ==
+					    QLA8XXX_DEV_QUIESCENT) {
+						set_bit(FCOE_CTX_RESET_NEEDED,
+						    &vha->dpc_flags);
+						ql_log(ql_log_info, vha, 0xb0d9,
+						    "%s: FW CONTEXT Reset "
+						    "needed!\n", __func__);
+					} else {
+						ql_log(ql_log_info, vha,
+						    0xb0da, "%s: "
+						    "detect abort needed\n",
+						    __func__);
+						set_bit(ISP_ABORT_NEEDED,
+						    &vha->dpc_flags);
+					}
+				}
+				qla2xxx_wake_dpc(vha);
+			}
+		}
+
+	}
+}
+
+static int
+qla8044_minidump_process_control(struct scsi_qla_host *vha,
+				 struct qla8044_minidump_entry_hdr *entry_hdr)
+{
+	struct qla8044_minidump_entry_crb *crb_entry;
+	uint32_t read_value, opcode, poll_time, addr, index;
+	uint32_t crb_addr, rval = QLA_SUCCESS;
+	unsigned long wtime;
+	struct qla8044_minidump_template_hdr *tmplt_hdr;
+	int i;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
+	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+		ha->md_tmplt_hdr;
+	crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
+
+	crb_addr = crb_entry->addr;
+	for (i = 0; i < crb_entry->op_count; i++) {
+		opcode = crb_entry->crb_ctrl.opcode;
+
+		if (opcode & QLA82XX_DBG_OPCODE_WR) {
+			qla8044_wr_reg_indirect(vha, crb_addr,
+			    crb_entry->value_1);
+			opcode &= ~QLA82XX_DBG_OPCODE_WR;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RW) {
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+			opcode &= ~QLA82XX_DBG_OPCODE_RW;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_AND) {
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+			read_value &= crb_entry->value_2;
+			opcode &= ~QLA82XX_DBG_OPCODE_AND;
+			if (opcode & QLA82XX_DBG_OPCODE_OR) {
+				read_value |= crb_entry->value_3;
+				opcode &= ~QLA82XX_DBG_OPCODE_OR;
+			}
+			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_OR) {
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+			read_value |= crb_entry->value_3;
+			qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+			opcode &= ~QLA82XX_DBG_OPCODE_OR;
+		}
+		if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+			poll_time = crb_entry->crb_strd.poll_timeout;
+			wtime = jiffies + poll_time;
+			qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+
+			do {
+				if ((read_value & crb_entry->value_2) ==
+				    crb_entry->value_1) {
+					break;
+				} else if (time_after_eq(jiffies, wtime)) {
+					/* capturing dump failed */
+					rval = QLA_FUNCTION_FAILED;
+					break;
+				} else {
+					qla8044_rd_reg_indirect(vha,
+					    crb_addr, &read_value);
+				}
+			} while (1);
+			opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			qla8044_rd_reg_indirect(vha, addr, &read_value);
+			index = crb_entry->crb_ctrl.state_index_v;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+			if (crb_entry->crb_strd.state_index_a) {
+				index = crb_entry->crb_strd.state_index_a;
+				addr = tmplt_hdr->saved_state_array[index];
+			} else {
+				addr = crb_addr;
+			}
+
+			if (crb_entry->crb_ctrl.state_index_v) {
+				index = crb_entry->crb_ctrl.state_index_v;
+				read_value =
+				    tmplt_hdr->saved_state_array[index];
+			} else {
+				read_value = crb_entry->value_1;
+			}
+
+			qla8044_wr_reg_indirect(vha, addr, read_value);
+			opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+		}
+
+		if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+			index = crb_entry->crb_ctrl.state_index_v;
+			read_value = tmplt_hdr->saved_state_array[index];
+			read_value <<= crb_entry->crb_ctrl.shl;
+			read_value >>= crb_entry->crb_ctrl.shr;
+			if (crb_entry->value_2)
+				read_value &= crb_entry->value_2;
+			read_value |= crb_entry->value_3;
+			read_value += crb_entry->value_1;
+			tmplt_hdr->saved_state_array[index] = read_value;
+			opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+		}
+		crb_addr += crb_entry->crb_strd.addr_stride;
+	}
+	return rval;
+}
+
+static void
+qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla8044_minidump_entry_crb *crb_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
+	crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
+	r_addr = crb_hdr->addr;
+	r_stride = crb_hdr->crb_strd.addr_stride;
+	loop_cnt = crb_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+		*data_ptr++ = r_addr;
+		*data_ptr++ = r_value;
+		r_addr += r_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static int
+qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_value, r_data;
+	uint32_t i, j, loop_cnt;
+	struct qla8044_minidump_entry_rdmem *m_hdr;
+	unsigned long flags;
+	uint32_t *data_ptr = *d_ptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
+	m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
+	r_addr = m_hdr->read_addr;
+	loop_cnt = m_hdr->read_data_size/16;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
+	    "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+	    __func__, r_addr, m_hdr->read_data_size);
+
+	if (r_addr & 0xf) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
+		    "[%s]: Read addr 0x%x not 16 bytes aligned\n",
+		    __func__, r_addr);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	if (m_hdr->read_data_size % 16) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
+		    "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+		    __func__, m_hdr->read_data_size);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
+	    "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+	    __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+	write_lock_irqsave(&ha->hw_lock, flags);
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
+		r_value = 0;
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
+		r_value = MIU_TA_CTL_ENABLE;
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+		r_value = MIU_TA_CTL_START_ENABLE;
+		qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+		for (j = 0; j < MAX_CTL_CHECK; j++) {
+			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+			    &r_value);
+			if ((r_value & MIU_TA_CTL_BUSY) == 0)
+				break;
+		}
+
+		if (j >= MAX_CTL_CHECK) {
+			write_unlock_irqrestore(&ha->hw_lock, flags);
+			return QLA_SUCCESS;
+		}
+
+		for (j = 0; j < 4; j++) {
+			qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
+			    &r_data);
+			*data_ptr++ = r_data;
+		}
+
+		r_addr += 16;
+	}
+	write_unlock_irqrestore(&ha->hw_lock, flags);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
+	    "Leaving fn: %s datacount: 0x%x\n",
+	     __func__, (loop_cnt * 16));
+
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+/* ISP83xx flash read for _RDROM _BOARD */
+static uint32_t
+qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t fl_addr, u32_count, rval;
+	struct qla8044_minidump_entry_rdrom *rom_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
+	fl_addr = rom_hdr->read_addr;
+	u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+	    __func__, fl_addr, u32_count);
+
+	rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
+	    (u8 *)(data_ptr), u32_count);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0xb0f6,
+		    "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
+		return QLA_FUNCTION_FAILED;
+	} else {
+		data_ptr += u32_count;
+		*d_ptr = data_ptr;
+		return QLA_SUCCESS;
+	}
+}
+
+static void
+qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, int index)
+{
+	entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+
+	ql_log(ql_log_info, vha, 0xb0f7,
+	    "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+	    vha->host_no, index, entry_hdr->entry_type,
+	    entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+static int
+qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+				 uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	unsigned long p_wait, w_time, p_mask;
+	uint32_t c_value_w, c_value_r;
+	struct qla8044_minidump_entry_cache *cache_hdr;
+	int rval = QLA_FUNCTION_FAILED;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
+	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+	p_wait = cache_hdr->cache_ctrl.poll_wait;
+	p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+	for (i = 0; i < loop_count; i++) {
+		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+		if (c_value_w)
+			qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+
+		if (p_mask) {
+			w_time = jiffies + p_wait;
+			do {
+				qla8044_rd_reg_indirect(vha, c_addr,
+				    &c_value_r);
+				if ((c_value_r & p_mask) == 0) {
+					break;
+				} else if (time_after_eq(jiffies, w_time)) {
+					/* capturing dump failed */
+					return rval;
+				}
+			} while (1);
+		}
+
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			qla8044_rd_reg_indirect(vha, addr, &r_value);
+			*data_ptr++ = r_value;
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+}
+
+static void
+qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t addr, r_addr, c_addr, t_r_addr;
+	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+	uint32_t c_value_w;
+	struct qla8044_minidump_entry_cache *cache_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+	loop_count = cache_hdr->op_count;
+	r_addr = cache_hdr->read_addr;
+	c_addr = cache_hdr->control_addr;
+	c_value_w = cache_hdr->cache_ctrl.write_value;
+
+	t_r_addr = cache_hdr->tag_reg_addr;
+	t_value = cache_hdr->addr_ctrl.init_tag_value;
+	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+	for (i = 0; i < loop_count; i++) {
+		qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+		qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+		addr = r_addr;
+		for (k = 0; k < r_cnt; k++) {
+			qla8044_rd_reg_indirect(vha, addr, &r_value);
+			*data_ptr++ = r_value;
+			addr += cache_hdr->read_ctrl.read_addr_stride;
+		}
+		t_value += cache_hdr->addr_ctrl.tag_value_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+	struct qla8044_minidump_entry_rdocm *ocm_hdr;
+	uint32_t *data_ptr = *d_ptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
+
+	ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
+	r_addr = ocm_hdr->read_addr;
+	r_stride = ocm_hdr->read_addr_stride;
+	loop_cnt = ocm_hdr->op_count;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
+	    "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+	    __func__, r_addr, r_stride, loop_cnt);
+
+	for (i = 0; i < loop_cnt; i++) {
+		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+		*data_ptr++ = r_value;
+		r_addr += r_stride;
+	}
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
+	    __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
+
+	*d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+	struct qla8044_minidump_entry_mux *mux_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
+
+	mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
+	r_addr = mux_hdr->read_addr;
+	s_addr = mux_hdr->select_addr;
+	s_stride = mux_hdr->select_value_stride;
+	s_value = mux_hdr->select_value;
+	loop_cnt = mux_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_wr_reg_indirect(vha, s_addr, s_value);
+		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+		*data_ptr++ = s_value;
+		*data_ptr++ = r_value;
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_queue(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t s_addr, r_addr;
+	uint32_t r_stride, r_value, r_cnt, qid = 0;
+	uint32_t i, k, loop_cnt;
+	struct qla8044_minidump_entry_queue *q_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
+	q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
+	s_addr = q_hdr->select_addr;
+	r_cnt = q_hdr->rd_strd.read_addr_cnt;
+	r_stride = q_hdr->rd_strd.read_addr_stride;
+	loop_cnt = q_hdr->op_count;
+
+	for (i = 0; i < loop_cnt; i++) {
+		qla8044_wr_reg_indirect(vha, s_addr, qid);
+		r_addr = q_hdr->read_addr;
+		for (k = 0; k < r_cnt; k++) {
+			qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+			*data_ptr++ = r_value;
+			r_addr += r_stride;
+		}
+		qid += q_hdr->q_strd.queue_id_stride;
+	}
+	*d_ptr = data_ptr;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t
+qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+	uint16_t s_stride, i;
+	struct qla8044_minidump_entry_pollrd *pollrd_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
+	s_addr = pollrd_hdr->select_addr;
+	r_addr = pollrd_hdr->read_addr;
+	s_value = pollrd_hdr->select_value;
+	s_stride = pollrd_hdr->select_value_stride;
+
+	poll_wait = pollrd_hdr->poll_wait;
+	poll_mask = pollrd_hdr->poll_mask;
+
+	for (i = 0; i < pollrd_hdr->op_count; i++) {
+		qla8044_wr_reg_indirect(vha, s_addr, s_value);
+		poll_wait = pollrd_hdr->poll_wait;
+		while (1) {
+			qla8044_rd_reg_indirect(vha, s_addr, &r_value);
+			if ((r_value & poll_mask) != 0) {
+				break;
+			} else {
+				usleep_range(1000, 1100);
+				if (--poll_wait == 0) {
+					ql_log(ql_log_fatal, vha, 0xb0fe,
+					    "%s: TIMEOUT\n", __func__);
+					goto error;
+				}
+			}
+		}
+		qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+		*data_ptr++ = s_value;
+		*data_ptr++ = r_value;
+
+		s_value += s_stride;
+	}
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+
+error:
+	return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+	uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+	struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
+	sel_val1 = rdmux2_hdr->select_value_1;
+	sel_val2 = rdmux2_hdr->select_value_2;
+	sel_addr1 = rdmux2_hdr->select_addr_1;
+	sel_addr2 = rdmux2_hdr->select_addr_2;
+	sel_val_mask = rdmux2_hdr->select_value_mask;
+	read_addr = rdmux2_hdr->read_addr;
+
+	for (i = 0; i < rdmux2_hdr->op_count; i++) {
+		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
+		t_sel_val = sel_val1 & sel_val_mask;
+		*data_ptr++ = t_sel_val;
+
+		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+		qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+		*data_ptr++ = data;
+
+		qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
+		t_sel_val = sel_val2 & sel_val_mask;
+		*data_ptr++ = t_sel_val;
+
+		qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+		qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+		*data_ptr++ = data;
+
+		sel_val1 += rdmux2_hdr->select_value_stride;
+		sel_val2 += rdmux2_hdr->select_value_stride;
+	}
+
+	*d_ptr = data_ptr;
+}
+
+static uint32_t
+qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr,
+	uint32_t **d_ptr)
+{
+	uint32_t poll_wait, poll_mask, r_value, data;
+	uint32_t addr_1, addr_2, value_1, value_2;
+	struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
+	uint32_t *data_ptr = *d_ptr;
+
+	poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
+	addr_1 = poll_hdr->addr_1;
+	addr_2 = poll_hdr->addr_2;
+	value_1 = poll_hdr->value_1;
+	value_2 = poll_hdr->value_2;
+	poll_mask = poll_hdr->poll_mask;
+
+	qla8044_wr_reg_indirect(vha, addr_1, value_1);
+
+	poll_wait = poll_hdr->poll_wait;
+	while (1) {
+		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+		if ((r_value & poll_mask) != 0) {
+			break;
+		} else {
+			usleep_range(1000, 1100);
+			if (--poll_wait == 0) {
+				ql_log(ql_log_fatal, vha, 0xb0ff,
+				    "%s: TIMEOUT\n", __func__);
+				goto error;
+			}
+		}
+	}
+
+	qla8044_rd_reg_indirect(vha, addr_2, &data);
+	data &= poll_hdr->modify_mask;
+	qla8044_wr_reg_indirect(vha, addr_2, data);
+	qla8044_wr_reg_indirect(vha, addr_1, value_2);
+
+	poll_wait = poll_hdr->poll_wait;
+	while (1) {
+		qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+		if ((r_value & poll_mask) != 0) {
+			break;
+		} else {
+			usleep_range(1000, 1100);
+			if (--poll_wait == 0) {
+				ql_log(ql_log_fatal, vha, 0xb100,
+				    "%s: TIMEOUT2\n", __func__);
+				goto error;
+			}
+		}
+	}
+
+	*data_ptr++ = addr_2;
+	*data_ptr++ = data;
+
+	*d_ptr = data_ptr;
+
+	return QLA_SUCCESS;
+
+error:
+	return QLA_FUNCTION_FAILED;
+}
+
+#define ISP8044_PEX_DMA_ENGINE_INDEX		8
+#define ISP8044_PEX_DMA_BASE_ADDRESS		0x77320000
+#define ISP8044_PEX_DMA_NUM_OFFSET		0x10000
+#define ISP8044_PEX_DMA_CMD_ADDR_LOW		0x0
+#define ISP8044_PEX_DMA_CMD_ADDR_HIGH		0x04
+#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL	0x08
+
+#define ISP8044_PEX_DMA_READ_SIZE	(16 * 1024)
+#define ISP8044_PEX_DMA_MAX_WAIT	(100 * 100) /* Max wait of 100 msecs */
+
+static int
+qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_SUCCESS;
+	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+	uint64_t dma_base_addr = 0;
+	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+	tmplt_hdr = ha->md_tmplt_hdr;
+	dma_eng_num =
+	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+	/* Read the pex-dma's command-status-and-control register. */
+	rval = qla8044_rd_reg_indirect(vha,
+	    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+	    &cmd_sts_and_cntrl);
+	if (rval)
+		return QLA_FUNCTION_FAILED;
+
+	/* Check if requested pex-dma engine is available. */
+	if (cmd_sts_and_cntrl & BIT_31)
+		return QLA_SUCCESS;
+
+	return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla8044_start_pex_dma(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_SUCCESS, wait = 0;
+	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+	uint64_t dma_base_addr = 0;
+	struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+	tmplt_hdr = ha->md_tmplt_hdr;
+	dma_eng_num =
+	    tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+	dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+		(dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+	rval = qla8044_wr_reg_indirect(vha,
+	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
+	    m_hdr->desc_card_addr);
+	if (rval)
+		goto error_exit;
+
+	rval = qla8044_wr_reg_indirect(vha,
+	    dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
+	if (rval)
+		goto error_exit;
+
+	rval = qla8044_wr_reg_indirect(vha,
+	    dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
+	    m_hdr->start_dma_cmd);
+	if (rval)
+		goto error_exit;
+
+	/* Wait for dma operation to complete. */
+	for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
+		rval = qla8044_rd_reg_indirect(vha,
+		    (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+		    &cmd_sts_and_cntrl);
+		if (rval)
+			goto error_exit;
+
+		if ((cmd_sts_and_cntrl & BIT_1) == 0)
+			break;
+
+		udelay(10);
+	}
+
+	/* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+	if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
+		rval = QLA_FUNCTION_FAILED;
+		goto error_exit;
+	}
+
+error_exit:
+	return rval;
+}
+
+static int
+qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_SUCCESS;
+	struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+	uint32_t chunk_size, read_size;
+	uint8_t *data_ptr = (uint8_t *)*d_ptr;
+	void *rdmem_buffer = NULL;
+	dma_addr_t rdmem_dma;
+	struct qla8044_pex_dma_descriptor dma_desc;
+
+	rval = qla8044_check_dma_engine_state(vha);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb147,
+		    "DMA engine not available. Fallback to rdmem-read.\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	m_hdr = (void *)entry_hdr;
+
+	rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+	    ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
+	if (!rdmem_buffer) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb148,
+		    "Unable to allocate rdmem dma buffer\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Prepare pex-dma descriptor to be written to MS memory. */
+	/* dma-desc-cmd layout:
+	 *		0-3: dma-desc-cmd 0-3
+	 *		4-7: pcid function number
+	 *		8-15: dma-desc-cmd 8-15
+	 * dma_bus_addr: dma buffer address
+	 * cmd.read_data_size: amount of data-chunk to be read.
+	 */
+	dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+	dma_desc.cmd.dma_desc_cmd |=
+	    ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+
+	dma_desc.dma_bus_addr = rdmem_dma;
+	dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
+	read_size = 0;
+
+	/*
+	 * Perform rdmem operation using pex-dma.
+	 * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
+	 */
+	while (read_size < m_hdr->read_data_size) {
+		if (m_hdr->read_data_size - read_size <
+		    ISP8044_PEX_DMA_READ_SIZE) {
+			chunk_size = (m_hdr->read_data_size - read_size);
+			dma_desc.cmd.read_data_size = chunk_size;
+		}
+
+		dma_desc.src_addr = m_hdr->read_addr + read_size;
+
+		/* Prepare: Write pex-dma descriptor to MS memory. */
+		rval = qla8044_ms_mem_write_128b(vha,
+		    m_hdr->desc_card_addr, (void *)&dma_desc,
+		    (sizeof(struct qla8044_pex_dma_descriptor)/16));
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0xb14a,
+			    "%s: Error writing rdmem-dma-init to MS !!!\n",
+			    __func__);
+			goto error_exit;
+		}
+		ql_dbg(ql_dbg_p3p, vha, 0xb14b,
+		    "%s: Dma-descriptor: Instruct for rdmem dma "
+		    "(chunk_size 0x%x).\n", __func__, chunk_size);
+
+		/* Execute: Start pex-dma operation. */
+		rval = qla8044_start_pex_dma(vha, m_hdr);
+		if (rval)
+			goto error_exit;
+
+		memcpy(data_ptr, rdmem_buffer, chunk_size);
+		data_ptr += chunk_size;
+		read_size += chunk_size;
+	}
+
+	*d_ptr = (void *)data_ptr;
+
+error_exit:
+	if (rdmem_buffer)
+		dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
+		    rdmem_buffer, rdmem_dma);
+
+	return rval;
+}
+
+static uint32_t
+qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	int loop_cnt;
+	uint32_t addr1, addr2, value, data, temp, wrVal;
+	uint8_t stride, stride2;
+	uint16_t count;
+	uint32_t poll, mask, modify_mask;
+	uint32_t wait_count = 0;
+
+	uint32_t *data_ptr = *d_ptr;
+
+	struct qla8044_minidump_entry_rddfe *rddfe;
+	rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
+
+	addr1 = rddfe->addr_1;
+	value = rddfe->value;
+	stride = rddfe->stride;
+	stride2 = rddfe->stride2;
+	count = rddfe->count;
+
+	poll = rddfe->poll;
+	mask = rddfe->mask;
+	modify_mask = rddfe->modify_mask;
+
+	addr2 = addr1 + stride;
+
+	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+		qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
+
+		wait_count = 0;
+		while (wait_count < poll) {
+			qla8044_rd_reg_indirect(vha, addr1, &temp);
+			if ((temp & mask) != 0)
+				break;
+			wait_count++;
+		}
+
+		if (wait_count == poll) {
+			ql_log(ql_log_warn, vha, 0xb153,
+			    "%s: TIMEOUT\n", __func__);
+			goto error;
+		} else {
+			qla8044_rd_reg_indirect(vha, addr2, &temp);
+			temp = temp & modify_mask;
+			temp = (temp | ((loop_cnt << 16) | loop_cnt));
+			wrVal = ((temp << 16) | temp);
+
+			qla8044_wr_reg_indirect(vha, addr2, wrVal);
+			qla8044_wr_reg_indirect(vha, addr1, value);
+
+			wait_count = 0;
+			while (wait_count < poll) {
+				qla8044_rd_reg_indirect(vha, addr1, &temp);
+				if ((temp & mask) != 0)
+					break;
+				wait_count++;
+			}
+			if (wait_count == poll) {
+				ql_log(ql_log_warn, vha, 0xb154,
+				    "%s: TIMEOUT\n", __func__);
+				goto error;
+			}
+
+			qla8044_wr_reg_indirect(vha, addr1,
+			    ((0x40000000 | value) + stride2));
+			wait_count = 0;
+			while (wait_count < poll) {
+				qla8044_rd_reg_indirect(vha, addr1, &temp);
+				if ((temp & mask) != 0)
+					break;
+				wait_count++;
+			}
+
+			if (wait_count == poll) {
+				ql_log(ql_log_warn, vha, 0xb155,
+				    "%s: TIMEOUT\n", __func__);
+				goto error;
+			}
+
+			qla8044_rd_reg_indirect(vha, addr2, &data);
+
+			*data_ptr++ = wrVal;
+			*data_ptr++ = data;
+		}
+
+	}
+
+	*d_ptr = data_ptr;
+	return QLA_SUCCESS;
+
+error:
+	return -1;
+
+}
+
+static uint32_t
+qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
+	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	int ret = 0;
+	uint32_t addr1, addr2, value1, value2, data, selVal;
+	uint8_t stride1, stride2;
+	uint32_t addr3, addr4, addr5, addr6, addr7;
+	uint16_t count, loop_cnt;
+	uint32_t mask;
+	uint32_t *data_ptr = *d_ptr;
+
+	struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+	rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
+
+	addr1 = rdmdio->addr_1;
+	addr2 = rdmdio->addr_2;
+	value1 = rdmdio->value_1;
+	stride1 = rdmdio->stride_1;
+	stride2 = rdmdio->stride_2;
+	count = rdmdio->count;
+
+	mask = rdmdio->mask;
+	value2 = rdmdio->value_2;
+
+	addr3 = addr1 + stride1;
+
+	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+		    addr3, mask);
+		if (ret == -1)
+			goto error;
+
+		addr4 = addr2 - stride1;
+		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
+		    value2);
+		if (ret == -1)
+			goto error;
+
+		addr5 = addr2 - (2 * stride1);
+		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
+		    value1);
+		if (ret == -1)
+			goto error;
+
+		addr6 = addr2 - (3 * stride1);
+		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
+		    addr6, 0x2);
+		if (ret == -1)
+			goto error;
+
+		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+		    addr3, mask);
+		if (ret == -1)
+			goto error;
+
+		addr7 = addr2 - (4 * stride1);
+		data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
+		if (data == -1)
+			goto error;
+
+		selVal = (value2 << 18) | (value1 << 2) | 2;
+
+		stride2 = rdmdio->stride_2;
+		*data_ptr++ = selVal;
+		*data_ptr++ = data;
+
+		value1 = value1 + stride2;
+		*d_ptr = data_ptr;
+	}
+
+	return 0;
+
+error:
+	return -1;
+}
+
+static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
+		struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+	uint32_t addr1, addr2, value1, value2, poll, r_value;
+	uint32_t wait_count = 0;
+	struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+
+	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+	addr1 = pollwr_hdr->addr_1;
+	addr2 = pollwr_hdr->addr_2;
+	value1 = pollwr_hdr->value_1;
+	value2 = pollwr_hdr->value_2;
+
+	poll = pollwr_hdr->poll;
+
+	while (wait_count < poll) {
+		qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+		if ((r_value & poll) != 0)
+			break;
+		wait_count++;
+	}
+
+	if (wait_count == poll) {
+		ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
+		goto error;
+	}
+
+	qla8044_wr_reg_indirect(vha, addr2, value2);
+	qla8044_wr_reg_indirect(vha, addr1, value1);
+
+	wait_count = 0;
+	while (wait_count < poll) {
+		qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+		if ((r_value & poll) != 0)
+			break;
+		wait_count++;
+	}
+
+	return QLA_SUCCESS;
+
+error:
+	return -1;
+}
+
+/*
+ *
+ * qla8044_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+int
+qla8044_collect_md_data(struct scsi_qla_host *vha)
+{
+	int num_entry_hdr = 0;
+	struct qla8044_minidump_entry_hdr *entry_hdr;
+	struct qla8044_minidump_template_hdr *tmplt_hdr;
+	uint32_t *data_ptr;
+	uint32_t data_collected = 0, f_capture_mask;
+	int i, rval = QLA_FUNCTION_FAILED;
+	uint64_t now;
+	uint32_t timestamp, idc_control;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->md_dump) {
+		ql_log(ql_log_info, vha, 0xb101,
+		    "%s(%ld) No buffer to dump\n",
+		    __func__, vha->host_no);
+		return rval;
+	}
+
+	if (ha->fw_dumped) {
+		ql_log(ql_log_warn, vha, 0xb10d,
+		    "Firmware has been previously dumped (%p) "
+		    "-- ignoring request.\n", ha->fw_dump);
+		goto md_failed;
+	}
+
+	ha->fw_dumped = 0;
+
+	if (!ha->md_tmplt_hdr || !ha->md_dump) {
+		ql_log(ql_log_warn, vha, 0xb10e,
+		    "Memory not allocated for minidump capture\n");
+		goto md_failed;
+	}
+
+	qla8044_idc_lock(ha);
+	idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	if (idc_control & GRACEFUL_RESET_BIT1) {
+		ql_log(ql_log_warn, vha, 0xb112,
+		    "Forced reset from application, "
+		    "ignore minidump capture\n");
+		qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+		    (idc_control & ~GRACEFUL_RESET_BIT1));
+		qla8044_idc_unlock(ha);
+
+		goto md_failed;
+	}
+	qla8044_idc_unlock(ha);
+
+	if (qla82xx_validate_template_chksum(vha)) {
+		ql_log(ql_log_info, vha, 0xb109,
+		    "Template checksum validation error\n");
+		goto md_failed;
+	}
+
+	tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+		ha->md_tmplt_hdr;
+	data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
+	num_entry_hdr = tmplt_hdr->num_of_entries;
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb11a,
+	    "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+	f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+	/* Validate whether required debug level is set */
+	if ((f_capture_mask & 0x3) != 0x3) {
+		ql_log(ql_log_warn, vha, 0xb10f,
+		    "Minimum required capture mask[0x%x] level not set\n",
+		    f_capture_mask);
+
+	}
+	tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+	ql_log(ql_log_info, vha, 0xb102,
+	    "[%s]: starting data ptr: %p\n",
+	   __func__, data_ptr);
+	ql_log(ql_log_info, vha, 0xb10b,
+	   "[%s]: no of entry headers in Template: 0x%x\n",
+	   __func__, num_entry_hdr);
+	ql_log(ql_log_info, vha, 0xb10c,
+	    "[%s]: Total_data_size 0x%x, %d obtained\n",
+	   __func__, ha->md_dump_size, ha->md_dump_size);
+
+	/* Update current timestamp before taking dump */
+	now = get_jiffies_64();
+	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+	tmplt_hdr->driver_timestamp = timestamp;
+
+	entry_hdr = (struct qla8044_minidump_entry_hdr *)
+		(((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+	tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
+	    tmplt_hdr->ocm_window_reg[ha->portnum];
+
+	/* Walk through the entry headers - validate/perform required action */
+	for (i = 0; i < num_entry_hdr; i++) {
+		if (data_collected > ha->md_dump_size) {
+			ql_log(ql_log_info, vha, 0xb103,
+			    "Data collected: [0x%x], "
+			    "Total Dump size: [0x%x]\n",
+			    data_collected, ha->md_dump_size);
+			return rval;
+		}
+
+		if (!(entry_hdr->d_ctrl.entry_capture_mask &
+		      ql2xmdcapmask)) {
+			entry_hdr->d_ctrl.driver_flags |=
+			    QLA82XX_DBG_SKIPPED_FLAG;
+			goto skip_nxt_entry;
+		}
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb104,
+		    "Data collected: [0x%x], Dump size left:[0x%x]\n",
+		    data_collected,
+		    (ha->md_dump_size - data_collected));
+
+		/* Decode the entry type and take required action to capture
+		 * debug data
+		 */
+		switch (entry_hdr->entry_type) {
+		case QLA82XX_RDEND:
+			qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA82XX_CNTRL:
+			rval = qla8044_minidump_process_control(vha,
+			    entry_hdr);
+			if (rval != QLA_SUCCESS) {
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA82XX_RDCRB:
+			qla8044_minidump_process_rdcrb(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDMEM:
+			rval = qla8044_minidump_pex_dma_read(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				rval = qla8044_minidump_process_rdmem(vha,
+				    entry_hdr, &data_ptr);
+				if (rval != QLA_SUCCESS) {
+					qla8044_mark_entry_skipped(vha,
+					    entry_hdr, i);
+					goto md_failed;
+				}
+			}
+			break;
+		case QLA82XX_BOARD:
+		case QLA82XX_RDROM:
+			rval = qla8044_minidump_process_rdrom(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla8044_mark_entry_skipped(vha,
+				    entry_hdr, i);
+			}
+			break;
+		case QLA82XX_L2DTG:
+		case QLA82XX_L2ITG:
+		case QLA82XX_L2DAT:
+		case QLA82XX_L2INS:
+			rval = qla8044_minidump_process_l2tag(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS) {
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+				goto md_failed;
+			}
+			break;
+		case QLA8044_L1DTG:
+		case QLA8044_L1ITG:
+		case QLA82XX_L1DAT:
+		case QLA82XX_L1INS:
+			qla8044_minidump_process_l1cache(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDOCM:
+			qla8044_minidump_process_rdocm(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_RDMUX:
+			qla8044_minidump_process_rdmux(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA82XX_QUEUE:
+			qla8044_minidump_process_queue(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA8044_POLLRD:
+			rval = qla8044_minidump_process_pollrd(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA8044_RDMUX2:
+			qla8044_minidump_process_rdmux2(vha,
+			    entry_hdr, &data_ptr);
+			break;
+		case QLA8044_POLLRDMWR:
+			rval = qla8044_minidump_process_pollrdmwr(vha,
+			    entry_hdr, &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA8044_RDDFE:
+			rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
+			    &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA8044_RDMDIO:
+			rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
+			    &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA8044_POLLWR:
+			rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
+			    &data_ptr);
+			if (rval != QLA_SUCCESS)
+				qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		case QLA82XX_RDNOP:
+		default:
+			qla8044_mark_entry_skipped(vha, entry_hdr, i);
+			break;
+		}
+
+		data_collected = (uint8_t *)data_ptr -
+		    (uint8_t *)((uint8_t *)ha->md_dump);
+skip_nxt_entry:
+		/*
+		 * next entry in the template
+		 */
+		entry_hdr = (struct qla8044_minidump_entry_hdr *)
+		    (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+	}
+
+	if (data_collected != ha->md_dump_size) {
+		ql_log(ql_log_info, vha, 0xb105,
+		    "Dump data mismatch: Data collected: "
+		    "[0x%x], total_data_size:[0x%x]\n",
+		    data_collected, ha->md_dump_size);
+		rval = QLA_FUNCTION_FAILED;
+		goto md_failed;
+	}
+
+	ql_log(ql_log_info, vha, 0xb110,
+	    "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+	    vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+	ha->fw_dumped = 1;
+	qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+
+	ql_log(ql_log_info, vha, 0xb106,
+	    "Leaving fn: %s Last entry: 0x%x\n",
+	    __func__, i);
+md_failed:
+	return rval;
+}
+
+void
+qla8044_get_minidump(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!qla8044_collect_md_data(vha)) {
+		ha->fw_dumped = 1;
+		ha->prev_minidump_failed = 0;
+	} else {
+		ql_log(ql_log_fatal, vha, 0xb0db,
+		    "%s: Unable to collect minidump\n",
+		    __func__);
+		ha->prev_minidump_failed = 1;
+	}
+}
+
+static int
+qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
+{
+	uint32_t flash_status;
+	int retries = QLA8044_FLASH_READ_RETRY_COUNT;
+	int ret_val = QLA_SUCCESS;
+
+	while (retries--) {
+		ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
+		    &flash_status);
+		if (ret_val) {
+			ql_log(ql_log_warn, vha, 0xb13c,
+			    "%s: Failed to read FLASH_STATUS reg.\n",
+			    __func__);
+			break;
+		}
+		if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
+		    QLA8044_FLASH_STATUS_READY)
+			break;
+		msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
+	}
+
+	if (!retries)
+		ret_val = QLA_FUNCTION_FAILED;
+
+	return ret_val;
+}
+
+static int
+qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
+			       uint32_t data)
+{
+	int ret_val = QLA_SUCCESS;
+	uint32_t cmd;
+
+	cmd = vha->hw->fdt_wrt_sts_reg_cmd;
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb125,
+		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
+		goto exit_func;
+	}
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb126,
+		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+		goto exit_func;
+	}
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_SECOND_ERASE_MS_VAL);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb127,
+		    "%s: Failed to write to FLASH_CONTROL.\n", __func__);
+		goto exit_func;
+	}
+
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val)
+		ql_log(ql_log_warn, vha, 0xb128,
+		    "%s: Error polling flash status reg.\n", __func__);
+
+exit_func:
+	return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_unprotect_flash(scsi_qla_host_t *vha)
+{
+	int ret_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
+	if (ret_val)
+		ql_log(ql_log_warn, vha, 0xb139,
+		    "%s: Write flash status failed.\n", __func__);
+
+	return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_protect_flash(scsi_qla_host_t *vha)
+{
+	int ret_val;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
+	if (ret_val)
+		ql_log(ql_log_warn, vha, 0xb13b,
+		    "%s: Write flash status failed.\n", __func__);
+
+	return ret_val;
+}
+
+
+static int
+qla8044_erase_flash_sector(struct scsi_qla_host *vha,
+			   uint32_t sector_start_addr)
+{
+	uint32_t reversed_addr;
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb12e,
+		    "%s: Poll flash status after erase failed..\n", __func__);
+	}
+
+	reversed_addr = (((sector_start_addr & 0xFF) << 16) |
+	    (sector_start_addr & 0xFF00) |
+	    ((sector_start_addr & 0xFF0000) >> 16));
+
+	ret_val = qla8044_wr_reg_indirect(vha,
+	    QLA8044_FLASH_WRDATA, reversed_addr);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb12f,
+		    "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	   QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb130,
+		    "%s: Failed to write to FLASH_ADDR.\n", __func__);
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_LAST_ERASE_MS_VAL);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb131,
+		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
+	}
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb132,
+		    "%s: Poll flash status failed.\n", __func__);
+	}
+
+
+	return ret_val;
+}
+
+/*
+ * qla8044_flash_write_u32 - Write data to flash
+ *
+ * @ha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * p_data : Data to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ *
+ * NOTE: Lock should be held on entry
+ */
+static int
+qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
+			uint32_t *p_data)
+{
+	int ret_val = QLA_SUCCESS;
+
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    0x00800000 | (addr >> 2));
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb134,
+		    "%s: Failed write to FLASH_ADDR.\n", __func__);
+		goto exit_func;
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb135,
+		    "%s: Failed write to FLASH_WRDATA.\n", __func__);
+		goto exit_func;
+	}
+	ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb136,
+		    "%s: Failed write to FLASH_CONTROL.\n", __func__);
+		goto exit_func;
+	}
+	ret_val = qla8044_poll_flash_status_reg(vha);
+	if (ret_val) {
+		ql_log(ql_log_warn, vha, 0xb137,
+		    "%s: Poll flash status failed.\n", __func__);
+	}
+
+exit_func:
+	return ret_val;
+}
+
+static int
+qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+				uint32_t faddr, uint32_t dwords)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	uint32_t spi_val;
+
+	if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
+	    dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
+		ql_dbg(ql_dbg_user, vha, 0xb123,
+		    "Got unsupported dwords = 0x%x.\n",
+		    dwords);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+	    spi_val | QLA8044_FLASH_SPI_CTL);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_FIRST_TEMP_VAL);
+
+	/* First DWORD write to FLASH_WRDATA */
+	ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
+	    *dwptr++);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_FIRST_MS_PATTERN);
+
+	ret = qla8044_poll_flash_status_reg(vha);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xb124,
+		    "%s: Failed.\n", __func__);
+		goto exit_func;
+	}
+
+	dwords--;
+
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_SECOND_TEMP_VAL);
+
+
+	/* Second to N-1 DWORDS writes */
+	while (dwords != 1) {
+		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+		    QLA8044_FLASH_SECOND_MS_PATTERN);
+		ret = qla8044_poll_flash_status_reg(vha);
+		if (ret) {
+			ql_log(ql_log_warn, vha, 0xb129,
+			    "%s: Failed.\n", __func__);
+			goto exit_func;
+		}
+		dwords--;
+	}
+
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+	    QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
+
+	/* Last DWORD write */
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+	qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+	    QLA8044_FLASH_LAST_MS_PATTERN);
+	ret = qla8044_poll_flash_status_reg(vha);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xb12a,
+		    "%s: Failed.\n", __func__);
+		goto exit_func;
+	}
+	qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
+
+	if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
+		ql_log(ql_log_warn, vha, 0xb12b,
+		    "%s: Failed.\n", __func__);
+		spi_val = 0;
+		/* Operation failed, clear error bit. */
+		qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+		    &spi_val);
+		qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+		    spi_val | QLA8044_FLASH_SPI_CTL);
+	}
+exit_func:
+	return ret;
+}
+
+static int
+qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+			       uint32_t faddr, uint32_t dwords)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	uint32_t liter;
+
+	for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+		ret = qla8044_flash_write_u32(vha, faddr, dwptr);
+		if (ret) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb141,
+			    "%s: flash address=%x data=%x.\n", __func__,
+			     faddr, *dwptr);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int
+qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+			  uint32_t offset, uint32_t length)
+{
+	int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
+	int dword_count, erase_sec_count;
+	uint32_t erase_offset;
+	uint8_t *p_cache, *p_src;
+
+	erase_offset = offset;
+
+	p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
+	if (!p_cache)
+		return QLA_FUNCTION_FAILED;
+
+	memcpy(p_cache, buf, length);
+	p_src = p_cache;
+	dword_count = length / sizeof(uint32_t);
+	/* Since the offset and legth are sector aligned, it will be always
+	 * multiple of burst_iter_count (64)
+	 */
+	burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
+	erase_sec_count = length / QLA8044_SECTOR_SIZE;
+
+	/* Suspend HBA. */
+	scsi_block_requests(vha->host);
+	/* Lock and enable write for whole operation. */
+	qla8044_flash_lock(vha);
+	qla8044_unprotect_flash(vha);
+
+	/* Erasing the sectors */
+	for (i = 0; i < erase_sec_count; i++) {
+		rval = qla8044_erase_flash_sector(vha, erase_offset);
+		ql_dbg(ql_dbg_user, vha, 0xb138,
+		    "Done erase of sector=0x%x.\n",
+		    erase_offset);
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0xb121,
+			    "Failed to erase the sector having address: "
+			    "0x%x.\n", erase_offset);
+			goto out;
+		}
+		erase_offset += QLA8044_SECTOR_SIZE;
+	}
+	ql_dbg(ql_dbg_user, vha, 0xb13f,
+	    "Got write for addr = 0x%x length=0x%x.\n",
+	    offset, length);
+
+	for (i = 0; i < burst_iter_count; i++) {
+
+		/* Go with write. */
+		rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
+		    offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
+		if (rval) {
+			/* Buffer Mode failed skip to dword mode */
+			ql_log(ql_log_warn, vha, 0xb122,
+			    "Failed to write flash in buffer mode, "
+			    "Reverting to slow-write.\n");
+			rval = qla8044_write_flash_dword_mode(vha,
+			    (uint32_t *)p_src, offset,
+			    QLA8044_MAX_OPTROM_BURST_DWORDS);
+		}
+		p_src +=  sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+		offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+	}
+	ql_dbg(ql_dbg_user, vha, 0xb133,
+	    "Done writing.\n");
+
+out:
+	qla8044_protect_flash(vha);
+	qla8044_flash_unlock(vha);
+	scsi_unblock_requests(vha->host);
+	kfree(p_cache);
+
+	return rval;
+}
+
+#define LEG_INT_PTR_B31		(1 << 31)
+#define LEG_INT_PTR_B30		(1 << 30)
+#define PF_BITS_MASK		(0xF << 16)
+/**
+ * qla8044_intr_handler() - Process interrupts for the ISP8044
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla8044_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct rsp_que *rsp;
+	struct device_reg_82xx __iomem *reg;
+	int		status = 0;
+	unsigned long	flags;
+	unsigned long	iter;
+	uint32_t	stat;
+	uint16_t	mb[4];
+	uint32_t leg_int_ptr = 0, pf_bit;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0xb143,
+		    "%s(): NULL response queue pointer\n", __func__);
+		return IRQ_NONE;
+	}
+	ha = rsp->hw;
+	vha = pci_get_drvdata(ha->pdev);
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return IRQ_HANDLED;
+
+	leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+
+	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+	if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb144,
+		    "%s: Legacy Interrupt Bit 31 not set, "
+		    "spurious interrupt!\n", __func__);
+		return IRQ_NONE;
+	}
+
+	pf_bit = ha->portnum << 16;
+	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+	if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
+		ql_dbg(ql_dbg_p3p, vha, 0xb145,
+		    "%s: Incorrect function ID 0x%x in "
+		    "legacy interrupt register, "
+		    "ha->pf_bit = 0x%x\n", __func__,
+		    (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
+		return IRQ_NONE;
+	}
+
+	/* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+	 * Control register and poll till Legacy Interrupt Pointer register
+	 * bit32 is 0.
+	 */
+	qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
+	do {
+		leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+		if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
+			break;
+	} while (leg_int_ptr & (LEG_INT_PTR_B30));
+
+	reg = &ha->iobase->isp82;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (iter = 1; iter--; ) {
+
+		if (RD_REG_DWORD(&reg->host_int)) {
+			stat = RD_REG_DWORD(&reg->host_status);
+			if ((stat & HSRX_RISC_INT) == 0)
+				break;
+
+			switch (stat & 0xff) {
+			case 0x1:
+			case 0x2:
+			case 0x10:
+			case 0x11:
+				qla82xx_mbx_completion(vha, MSW(stat));
+				status |= MBX_INTERRUPT;
+				break;
+			case 0x12:
+				mb[0] = MSW(stat);
+				mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+				mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+				mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+				qla2x00_async_event(vha, rsp, mb);
+				break;
+			case 0x13:
+				qla24xx_process_response_queue(vha, rsp);
+				break;
+			default:
+				ql_dbg(ql_dbg_p3p, vha, 0xb146,
+				    "Unrecognized interrupt type "
+				    "(%d).\n", stat & 0xff);
+				break;
+			}
+		}
+		WRT_REG_DWORD(&reg->host_int, 0);
+	}
+
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int
+qla8044_idc_dontreset(struct qla_hw_data *ha)
+{
+	uint32_t idc_ctrl;
+
+	idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+	return idc_ctrl & DONTRESET_BIT0;
+}
+
+static void
+qla8044_clear_rst_ready(scsi_qla_host_t *vha)
+{
+	uint32_t drv_state;
+
+	drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+	/*
+	 * For ISP8044, drv_active register has 1 bit per function,
+	 * shift 1 by func_num to set a bit for the function.
+	 * For ISP82xx, drv_active has 4 bits per function
+	 */
+	drv_state &= ~(1 << vha->hw->portnum);
+
+	ql_dbg(ql_dbg_p3p, vha, 0xb13d,
+	    "drv_state: 0x%08x\n", drv_state);
+	qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+int
+qla8044_abort_isp(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint32_t dev_state;
+	struct qla_hw_data *ha = vha->hw;
+
+	qla8044_idc_lock(ha);
+	dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+	if (ql2xdontresethba)
+		qla8044_set_idc_dontreset(vha);
+
+	/* If device_state is NEED_RESET, go ahead with
+	 * Reset,irrespective of ql2xdontresethba. This is to allow a
+	 * non-reset-owner to force a reset. Non-reset-owner sets
+	 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+	 * and then forces a Reset by setting device_state to
+	 * NEED_RESET. */
+	if (dev_state == QLA8XXX_DEV_READY) {
+		/* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
+		 * recovery */
+		if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
+			ql_dbg(ql_dbg_p3p, vha, 0xb13e,
+			    "Reset recovery disabled\n");
+			rval = QLA_FUNCTION_FAILED;
+			goto exit_isp_reset;
+		}
+
+		ql_dbg(ql_dbg_p3p, vha, 0xb140,
+		    "HW State: NEED RESET\n");
+		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+		    QLA8XXX_DEV_NEED_RESET);
+	}
+
+	/* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
+	 * and which drivers are present. Unlike ISP82XX, the function setting
+	 * NEED_RESET, may not be the Reset owner. */
+	qla83xx_reset_ownership(vha);
+
+	qla8044_idc_unlock(ha);
+	rval = qla8044_device_state_handler(vha);
+	qla8044_idc_lock(ha);
+	qla8044_clear_rst_ready(vha);
+
+exit_isp_reset:
+	qla8044_idc_unlock(ha);
+	if (rval == QLA_SUCCESS) {
+		ha->flags.isp82xx_fw_hung = 0;
+		ha->flags.nic_core_reset_hdlr_active = 0;
+		rval = qla82xx_restart_isp(vha);
+	}
+
+	return rval;
+}
+
+void
+qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->allow_cna_fw_dump)
+		return;
+
+	scsi_block_requests(vha->host);
+	ha->flags.isp82xx_no_md_cap = 1;
+	qla8044_idc_lock(ha);
+	qla82xx_set_reset_owner(vha);
+	qla8044_idc_unlock(ha);
+	qla2x00_wait_for_chip_reset(vha);
+	scsi_unblock_requests(vha->host);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx2.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx2.h
new file mode 100644
index 0000000..83c1b7e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_nx2.h
@@ -0,0 +1,584 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_NX2_H
+#define __QLA_NX2_H
+
+#define QSNT_ACK_TOV				30
+#define INTENT_TO_RECOVER			0x01
+#define PROCEED_TO_RECOVER			0x02
+#define IDC_LOCK_RECOVERY_OWNER_MASK		0x3C
+#define IDC_LOCK_RECOVERY_STATE_MASK		0x3
+#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS	2
+
+#define QLA8044_DRV_LOCK_MSLEEP		200
+#define QLA8044_ADDR_DDR_NET		(0x0000000000000000ULL)
+#define QLA8044_ADDR_DDR_NET_MAX	(0x000000000fffffffULL)
+
+#define MD_MIU_TEST_AGT_WRDATA_LO		0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI		0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO		0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI		0x410000B4
+#define MD_MIU_TEST_AGT_RDDATA_LO		0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI		0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO		0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI		0x410000BC
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_WRITE_ENABLE	(MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START	(MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |	\
+				 MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE	(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA8044_P2_ADDR_PCIE	(0x0000000800000000ULL)
+#define QLA8044_P3_ADDR_PCIE	(0x0000008000000000ULL)
+#define QLA8044_ADDR_PCIE_MAX	(0x0000000FFFFFFFFFULL)
+#define QLA8044_ADDR_OCM0	(0x0000000200000000ULL)
+#define QLA8044_ADDR_OCM0_MAX	(0x00000002000fffffULL)
+#define QLA8044_ADDR_OCM1	(0x0000000200400000ULL)
+#define QLA8044_ADDR_OCM1_MAX	(0x00000002004fffffULL)
+#define QLA8044_ADDR_QDR_NET	(0x0000000300000000ULL)
+#define QLA8044_P2_ADDR_QDR_NET_MAX	(0x00000003001fffffULL)
+#define QLA8044_P3_ADDR_QDR_NET_MAX	(0x0000000303ffffffULL)
+#define QLA8044_ADDR_QDR_NET_MAX	(0x0000000307ffffffULL)
+#define QLA8044_PCI_CRBSPACE		((unsigned long)0x06000000)
+#define QLA8044_PCI_DIRECT_CRB		((unsigned long)0x04400000)
+#define QLA8044_PCI_CAMQM		((unsigned long)0x04800000)
+#define QLA8044_PCI_CAMQM_MAX		((unsigned long)0x04ffffff)
+#define QLA8044_PCI_DDR_NET		((unsigned long)0x00000000)
+#define QLA8044_PCI_QDR_NET		((unsigned long)0x04000000)
+#define QLA8044_PCI_QDR_NET_MAX		((unsigned long)0x043fffff)
+
+/*  PCI Windowing for DDR regions.  */
+static inline bool addr_in_range(u64 addr, u64 low, u64 high)
+{
+	return addr <= high && addr >= low;
+}
+
+/* Indirectly Mapped Registers */
+#define QLA8044_FLASH_SPI_STATUS	0x2808E010
+#define QLA8044_FLASH_SPI_CONTROL	0x2808E014
+#define QLA8044_FLASH_STATUS		0x42100004
+#define QLA8044_FLASH_CONTROL		0x42110004
+#define QLA8044_FLASH_ADDR		0x42110008
+#define QLA8044_FLASH_WRDATA		0x4211000C
+#define QLA8044_FLASH_RDDATA		0x42110018
+#define QLA8044_FLASH_DIRECT_WINDOW	0x42110030
+#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Flash access regs */
+#define QLA8044_FLASH_LOCK		0x3850
+#define QLA8044_FLASH_UNLOCK		0x3854
+#define QLA8044_FLASH_LOCK_ID		0x3500
+
+/* Driver Lock regs */
+#define QLA8044_DRV_LOCK		0x3868
+#define QLA8044_DRV_UNLOCK		0x386C
+#define QLA8044_DRV_LOCK_ID		0x3504
+#define QLA8044_DRV_LOCKRECOVERY	0x379C
+
+/* IDC version */
+#define QLA8044_IDC_VER_MAJ_VALUE       0x1
+#define QLA8044_IDC_VER_MIN_VALUE       0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA8044_CRB_IDC_VER_MAJOR	0x3780
+#define QLA8044_CRB_IDC_VER_MINOR	0x3798
+#define QLA8044_IDC_DRV_AUDIT		0x3794
+#define QLA8044_SRE_SHIM_CONTROL	0x0D200284
+#define QLA8044_PORT0_RXB_PAUSE_THRS	0x0B2003A4
+#define QLA8044_PORT1_RXB_PAUSE_THRS	0x0B2013A4
+#define QLA8044_PORT0_RXB_TC_MAX_CELL	0x0B200388
+#define QLA8044_PORT1_RXB_TC_MAX_CELL	0x0B201388
+#define QLA8044_PORT0_RXB_TC_STATS	0x0B20039C
+#define QLA8044_PORT1_RXB_TC_STATS	0x0B20139C
+#define QLA8044_PORT2_IFB_PAUSE_THRS	0x0B200704
+#define QLA8044_PORT3_IFB_PAUSE_THRS	0x0B201704
+
+/* set value to pause threshold value */
+#define QLA8044_SET_PAUSE_VAL		0x0
+#define QLA8044_SET_TC_MAX_CELL_VAL	0x03FF03FF
+#define QLA8044_PEG_HALT_STATUS1	0x34A8
+#define QLA8044_PEG_HALT_STATUS2	0x34AC
+#define QLA8044_PEG_ALIVE_COUNTER	0x34B0 /* FW_HEARTBEAT */
+#define QLA8044_FW_CAPABILITIES		0x3528
+#define QLA8044_CRB_DRV_ACTIVE		0x3788 /* IDC_DRV_PRESENCE */
+#define QLA8044_CRB_DEV_STATE		0x3784 /* IDC_DEV_STATE */
+#define QLA8044_CRB_DRV_STATE		0x378C /* IDC_DRV_ACK */
+#define QLA8044_CRB_DRV_SCRATCH		0x3548
+#define QLA8044_CRB_DEV_PART_INFO1	0x37E0
+#define QLA8044_CRB_DEV_PART_INFO2	0x37E4
+#define QLA8044_FW_VER_MAJOR		0x3550
+#define QLA8044_FW_VER_MINOR		0x3554
+#define QLA8044_FW_VER_SUB		0x3558
+#define QLA8044_NPAR_STATE		0x359C
+#define QLA8044_FW_IMAGE_VALID		0x35FC
+#define QLA8044_CMDPEG_STATE		0x3650
+#define QLA8044_ASIC_TEMP		0x37B4
+#define QLA8044_FW_API			0x356C
+#define QLA8044_DRV_OP_MODE		0x3570
+#define QLA8044_CRB_WIN_BASE		0x3800
+#define QLA8044_CRB_WIN_FUNC(f)		(QLA8044_CRB_WIN_BASE+((f)*4))
+#define QLA8044_SEM_LOCK_BASE		0x3840
+#define QLA8044_SEM_UNLOCK_BASE		0x3844
+#define QLA8044_SEM_LOCK_FUNC(f)	(QLA8044_SEM_LOCK_BASE+((f)*8))
+#define QLA8044_SEM_UNLOCK_FUNC(f)	(QLA8044_SEM_UNLOCK_BASE+((f)*8))
+#define QLA8044_LINK_STATE(f)		(0x3698+((f) > 7 ? 4 : 0))
+#define QLA8044_LINK_SPEED(f)		(0x36E0+(((f) >> 2) * 4))
+#define QLA8044_MAX_LINK_SPEED(f)       (0x36F0+(((f) / 4) * 4))
+#define QLA8044_LINK_SPEED_FACTOR	10
+#define QLA8044_FUN7_ACTIVE_INDEX	0x80
+
+/* FLASH API Defines */
+#define QLA8044_FLASH_MAX_WAIT_USEC	100
+#define QLA8044_FLASH_LOCK_TIMEOUT	10000
+#define QLA8044_FLASH_SECTOR_SIZE	65536
+#define QLA8044_DRV_LOCK_TIMEOUT	2000
+#define QLA8044_FLASH_SECTOR_ERASE_CMD	0xdeadbeef
+#define QLA8044_FLASH_WRITE_CMD		0xdacdacda
+#define QLA8044_FLASH_BUFFER_WRITE_CMD	0xcadcadca
+#define QLA8044_FLASH_READ_RETRY_COUNT	2000
+#define QLA8044_FLASH_STATUS_READY	0x6
+#define QLA8044_FLASH_BUFFER_WRITE_MIN	2
+#define QLA8044_FLASH_BUFFER_WRITE_MAX	64
+#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA8044_ERASE_MODE		1
+#define QLA8044_WRITE_MODE		2
+#define QLA8044_DWORD_WRITE_MODE	3
+#define QLA8044_GLOBAL_RESET		0x38CC
+#define QLA8044_WILDCARD		0x38F0
+#define QLA8044_INFORMANT		0x38FC
+#define QLA8044_HOST_MBX_CTRL		0x3038
+#define QLA8044_FW_MBX_CTRL		0x303C
+#define QLA8044_BOOTLOADER_ADDR		0x355C
+#define QLA8044_BOOTLOADER_SIZE		0x3560
+#define QLA8044_FW_IMAGE_ADDR		0x3564
+#define QLA8044_MBX_INTR_ENABLE		0x1000
+#define QLA8044_MBX_INTR_MASK		0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0		0x1
+#define GRACEFUL_RESET_BIT1	0x2
+
+/* ISP8044 PEG_HALT_STATUS1 bits */
+#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA8044_HALT_STATUS_FW_RESET	  (0x2 << 29)
+#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA8044_BOOTLOADER_FLASH_ADDR	0x10000
+#define QLA8044_BOOT_FROM_FLASH		0
+#define QLA8044_IDC_PARAM_ADDR		0x3e8020
+
+/* FLASH related definitions */
+#define QLA8044_OPTROM_BURST_SIZE		0x100
+#define QLA8044_MAX_OPTROM_BURST_DWORDS		(QLA8044_OPTROM_BURST_SIZE / 4)
+#define QLA8044_MIN_OPTROM_BURST_DWORDS		2
+#define QLA8044_SECTOR_SIZE			(64 * 1024)
+
+#define QLA8044_FLASH_SPI_CTL			0x4
+#define QLA8044_FLASH_FIRST_TEMP_VAL		0x00800000
+#define QLA8044_FLASH_SECOND_TEMP_VAL		0x00800001
+#define QLA8044_FLASH_FIRST_MS_PATTERN		0x43
+#define QLA8044_FLASH_SECOND_MS_PATTERN		0x7F
+#define QLA8044_FLASH_LAST_MS_PATTERN		0x7D
+#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG	0xFD0100
+#define QLA8044_FLASH_SECOND_ERASE_MS_VAL	0x5
+#define QLA8044_FLASH_ERASE_SIG			0xFD0300
+#define QLA8044_FLASH_LAST_ERASE_MS_VAL		0x3D
+
+/* Reset template definitions */
+#define QLA8044_MAX_RESET_SEQ_ENTRIES	16
+#define QLA8044_RESTART_TEMPLATE_SIZE	0x2000
+#define QLA8044_RESET_TEMPLATE_ADDR	0x4F0000
+#define QLA8044_RESET_SEQ_VERSION	0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP			0x0000
+#define OPCODE_WRITE_LIST		0x0001
+#define OPCODE_READ_WRITE_LIST		0x0002
+#define OPCODE_POLL_LIST		0x0004
+#define OPCODE_POLL_WRITE_LIST		0x0008
+#define OPCODE_READ_MODIFY_WRITE	0x0010
+#define OPCODE_SEQ_PAUSE		0x0020
+#define OPCODE_SEQ_END			0x0040
+#define OPCODE_TMPL_END			0x0080
+#define OPCODE_POLL_READ_LIST		0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE	0xCAFE
+#define QLA8044_IDC_DRV_CTRL            0x3790
+#define AF_8044_NO_FW_DUMP              27 /* 0x08000000 */
+
+#define MINIDUMP_SIZE_36K		36864
+
+struct qla8044_reset_template_hdr {
+	uint16_t	version;
+	uint16_t	signature;
+	uint16_t	size;
+	uint16_t	entries;
+	uint16_t	hdr_size;
+	uint16_t	checksum;
+	uint16_t	init_seq_offset;
+	uint16_t	start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla8044_reset_entry_hdr {
+	uint16_t cmd;
+	uint16_t size;
+	uint16_t count;
+	uint16_t delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla8044_poll {
+	uint32_t  test_mask;
+	uint32_t  test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla8044_rmw {
+	uint32_t test_mask;
+	uint32_t xor_value;
+	uint32_t  or_value;
+	uint8_t shl;
+	uint8_t shr;
+	uint8_t index_a;
+	uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla8044_entry {
+	uint32_t arg1;
+	uint32_t arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla8044_quad_entry {
+	uint32_t dr_addr;
+	uint32_t dr_value;
+	uint32_t ar_addr;
+	uint32_t ar_value;
+} __packed;
+
+struct qla8044_reset_template {
+	int seq_index;
+	int seq_error;
+	int array_index;
+	uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES];
+	uint8_t *buff;
+	uint8_t *stop_offset;
+	uint8_t *start_offset;
+	uint8_t *init_offset;
+	struct qla8044_reset_template_hdr *hdr;
+	uint8_t seq_end;
+	uint8_t template_end;
+};
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8044_minidump_entry_hdr {
+	uint32_t entry_type;
+	uint32_t entry_size;
+	uint32_t entry_capture_size;
+	struct {
+		uint8_t entry_capture_mask;
+		uint8_t entry_code;
+		uint8_t driver_code;
+		uint8_t driver_flags;
+	} d_ctrl;
+} __packed;
+
+/*  Read CRB entry header */
+struct qla8044_minidump_entry_crb {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t addr;
+	struct {
+		uint8_t addr_stride;
+		uint8_t state_index_a;
+		uint16_t poll_timeout;
+	} crb_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+
+	struct {
+		uint8_t opcode;
+		uint8_t state_index_v;
+		uint8_t shl;
+		uint8_t shr;
+	} crb_ctrl;
+
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t value_3;
+} __packed;
+
+struct qla8044_minidump_entry_cache {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t tag_reg_addr;
+	struct {
+		uint16_t tag_value_stride;
+		uint16_t init_tag_value;
+	} addr_ctrl;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t control_addr;
+	struct {
+		uint16_t write_value;
+		uint8_t poll_mask;
+		uint8_t poll_wait;
+	} cache_ctrl;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_1;
+	} read_ctrl;
+} __packed;
+
+/* Read OCM */
+struct qla8044_minidump_entry_rdocm {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t rsvd_0;
+	uint32_t rsvd_1;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_2;
+	uint32_t rsvd_3;
+	uint32_t read_addr;
+	uint32_t read_addr_stride;
+} __packed;
+
+/* Read Memory */
+struct qla8044_minidump_entry_rdmem {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+};
+
+/* Read Memory: For Pex-DMA */
+struct qla8044_minidump_entry_rdmem_pex_dma {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t desc_card_addr;
+	uint16_t dma_desc_cmd;
+	uint8_t rsvd[2];
+	uint32_t start_dma_cmd;
+	uint8_t rsvd2[12];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+} __packed;
+
+/* Read ROM */
+struct qla8044_minidump_entry_rdrom {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t rsvd[6];
+	uint32_t read_addr;
+	uint32_t read_data_size;
+} __packed;
+
+/* Mux entry */
+struct qla8044_minidump_entry_mux {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t rsvd_0;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t select_value;
+	uint32_t select_value_stride;
+	uint32_t read_addr;
+	uint32_t rsvd_1;
+} __packed;
+
+/* Queue entry */
+struct qla8044_minidump_entry_queue {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr;
+	struct {
+		uint16_t queue_id_stride;
+		uint16_t rsvd_0;
+	} q_strd;
+	uint32_t data_size;
+	uint32_t op_count;
+	uint32_t rsvd_1;
+	uint32_t rsvd_2;
+	uint32_t read_addr;
+	struct {
+		uint8_t read_addr_stride;
+		uint8_t read_addr_cnt;
+		uint16_t rsvd_3;
+	} rd_strd;
+} __packed;
+
+/* POLLRD Entry */
+struct qla8044_minidump_entry_pollrd {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr;
+	uint32_t read_addr;
+	uint32_t select_value;
+	uint16_t select_value_stride;
+	uint16_t op_count;
+	uint32_t poll_wait;
+	uint32_t poll_mask;
+	uint32_t data_size;
+	uint32_t rsvd_1;
+} __packed;
+
+struct qla8044_minidump_entry_rddfe {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t value;
+	uint8_t stride;
+	uint8_t stride2;
+	uint16_t count;
+	uint32_t poll;
+	uint32_t mask;
+	uint32_t modify_mask;
+	uint32_t data_size;
+	uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+	struct qla8044_minidump_entry_hdr h;
+
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint8_t stride_1;
+	uint8_t stride_2;
+	uint16_t count;
+	uint32_t poll;
+	uint32_t mask;
+	uint32_t value_2;
+	uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t poll;
+	uint32_t mask;
+	uint32_t data_size;
+	uint32_t rsvd;
+
+}  __packed;
+
+/* RDMUX2 Entry */
+struct qla8044_minidump_entry_rdmux2 {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t select_addr_1;
+	uint32_t select_addr_2;
+	uint32_t select_value_1;
+	uint32_t select_value_2;
+	uint32_t op_count;
+	uint32_t select_value_mask;
+	uint32_t read_addr;
+	uint8_t select_value_stride;
+	uint8_t data_size;
+	uint8_t rsvd[2];
+} __packed;
+
+/* POLLRDMWR Entry */
+struct qla8044_minidump_entry_pollrdmwr {
+	struct qla8044_minidump_entry_hdr h;
+	uint32_t addr_1;
+	uint32_t addr_2;
+	uint32_t value_1;
+	uint32_t value_2;
+	uint32_t poll_wait;
+	uint32_t poll_mask;
+	uint32_t modify_mask;
+	uint32_t data_size;
+} __packed;
+
+/* IDC additional information */
+struct qla8044_idc_information {
+	uint32_t request_desc;  /* IDC request descriptor */
+	uint32_t info1; /* IDC additional info */
+	uint32_t info2; /* IDC additional info */
+	uint32_t info3; /* IDC additional info */
+} __packed;
+
+enum qla_regs {
+	QLA8044_PEG_HALT_STATUS1_INDEX = 0,
+	QLA8044_PEG_HALT_STATUS2_INDEX,
+	QLA8044_PEG_ALIVE_COUNTER_INDEX,
+	QLA8044_CRB_DRV_ACTIVE_INDEX,
+	QLA8044_CRB_DEV_STATE_INDEX,
+	QLA8044_CRB_DRV_STATE_INDEX,
+	QLA8044_CRB_DRV_SCRATCH_INDEX,
+	QLA8044_CRB_DEV_PART_INFO_INDEX,
+	QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+	QLA8044_FW_VERSION_MAJOR_INDEX,
+	QLA8044_FW_VERSION_MINOR_INDEX,
+	QLA8044_FW_VERSION_SUB_INDEX,
+	QLA8044_CRB_CMDPEG_STATE_INDEX,
+	QLA8044_CRB_TEMP_STATE_INDEX,
+} __packed;
+
+#define CRB_REG_INDEX_MAX	14
+#define CRB_CMDPEG_CHECK_RETRY_COUNT    60
+#define CRB_CMDPEG_CHECK_DELAY          500
+
+/* MiniDump Structures */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+#define QLA8044_SS_OCM_WNDREG_INDEX             3
+#define QLA8044_DBG_STATE_ARRAY_LEN             16
+#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN          8
+#define QLA8044_DBG_RSVD_ARRAY_LEN              8
+#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN        16
+#define QLA8044_SS_PCI_INDEX                    0
+#define QLA8044_RDDFE          38
+#define QLA8044_RDMDIO         39
+#define QLA8044_POLLWR         40
+
+struct qla8044_minidump_template_hdr {
+	uint32_t entry_type;
+	uint32_t first_entry_offset;
+	uint32_t size_of_template;
+	uint32_t capture_debug_level;
+	uint32_t num_of_entries;
+	uint32_t version;
+	uint32_t driver_timestamp;
+	uint32_t checksum;
+
+	uint32_t driver_capture_mask;
+	uint32_t driver_info_word2;
+	uint32_t driver_info_word3;
+	uint32_t driver_info_word4;
+
+	uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN];
+	uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN];
+	uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN];
+};
+
+struct qla8044_pex_dma_descriptor {
+	struct {
+		uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+		uint8_t rsvd[2];
+		uint16_t dma_desc_cmd;
+	} cmd;
+	uint64_t src_addr;
+	uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/
+	uint8_t rsvd[24];
+} __packed;
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_os.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_os.c
new file mode 100644
index 0000000..ea60c6e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_os.c
@@ -0,0 +1,6820 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "qla_target.h"
+
+/*
+ * Driver version
+ */
+char qla2x00_version_str[40];
+
+static int apidev_major;
+
+/*
+ * SRB allocation cache
+ */
+struct kmem_cache *srb_cachep;
+
+/*
+ * CT6 CTX allocation cache
+ */
+static struct kmem_cache *ctx_cachep;
+/*
+ * error level for logging
+ */
+int ql_errlev = ql_log_all;
+
+static int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+		"Specify if Class 2 operations are supported from the very "
+		"beginning. Default is 0 - class 2 not supported.");
+
+
+int ql2xlogintimeout = 20;
+module_param(ql2xlogintimeout, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xlogintimeout,
+		"Login timeout value in seconds.");
+
+int qlport_down_retry;
+module_param(qlport_down_retry, int, S_IRUGO);
+MODULE_PARM_DESC(qlport_down_retry,
+		"Maximum number of command retries to a port that returns "
+		"a PORT-DOWN status.");
+
+int ql2xplogiabsentdevice;
+module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xplogiabsentdevice,
+		"Option to enable PLOGI to devices that are not present after "
+		"a Fabric scan.  This is needed for several broken switches. "
+		"Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+
+int ql2xloginretrycount = 0;
+module_param(ql2xloginretrycount, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xloginretrycount,
+		"Specify an alternate value for the NVRAM login retry count.");
+
+int ql2xallocfwdump = 1;
+module_param(ql2xallocfwdump, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xallocfwdump,
+		"Option to enable allocation of memory for a firmware dump "
+		"during HBA initialization.  Memory allocation requirements "
+		"vary by ISP type.  Default is 1 - allocate memory.");
+
+int ql2xextended_error_logging;
+module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
+module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xextended_error_logging,
+		"Option to enable extended error logging,\n"
+		"\t\tDefault is 0 - no logging.  0x40000000 - Module Init & Probe.\n"
+		"\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
+		"\t\t0x08000000 - IO tracing.    0x04000000 - DPC Thread.\n"
+		"\t\t0x02000000 - Async events.  0x01000000 - Timer routines.\n"
+		"\t\t0x00800000 - User space.    0x00400000 - Task Management.\n"
+		"\t\t0x00200000 - AER/EEH.       0x00100000 - Multi Q.\n"
+		"\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
+		"\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
+		"\t\t0x00008000 - Verbose.       0x00004000 - Target.\n"
+		"\t\t0x00002000 - Target Mgmt.   0x00001000 - Target TMF.\n"
+		"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+		"\t\t0x1e400000 - Preferred value for capturing essential "
+		"debug information (equivalent to old "
+		"ql2xextended_error_logging=1).\n"
+		"\t\tDo LOGICAL OR of the value to enable more than one level");
+
+int ql2xshiftctondsd = 6;
+module_param(ql2xshiftctondsd, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xshiftctondsd,
+		"Set to control shifting of command type processing "
+		"based on total number of SG elements.");
+
+int ql2xfdmienable=1;
+module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
+module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xfdmienable,
+		"Enables FDMI registrations. "
+		"0 - no FDMI. Default is 1 - perform FDMI.");
+
+#define MAX_Q_DEPTH	64
+static int ql2xmaxqdepth = MAX_Q_DEPTH;
+module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xmaxqdepth,
+		"Maximum queue depth to set for each LUN. "
+		"Default is 64.");
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xenabledif;
+#else
+int ql2xenabledif = 2;
+#endif
+module_param(ql2xenabledif, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xenabledif,
+		" Enable T10-CRC-DIF:\n"
+		" Default is 2.\n"
+		"  0 -- No DIF Support\n"
+		"  1 -- Enable DIF for all types\n"
+		"  2 -- Enable DIF for all types, except Type 0.\n");
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xnvmeenable = 1;
+#else
+int ql2xnvmeenable;
+#endif
+module_param(ql2xnvmeenable, int, 0644);
+MODULE_PARM_DESC(ql2xnvmeenable,
+    "Enables NVME support. "
+    "0 - no NVMe.  Default is Y");
+
+int ql2xenablehba_err_chk = 2;
+module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xenablehba_err_chk,
+		" Enable T10-CRC-DIF Error isolation by HBA:\n"
+		" Default is 2.\n"
+		"  0 -- Error isolation disabled\n"
+		"  1 -- Error isolation enabled only for DIX Type 0\n"
+		"  2 -- Error isolation enabled for all Types\n");
+
+int ql2xiidmaenable=1;
+module_param(ql2xiidmaenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xiidmaenable,
+		"Enables iIDMA settings "
+		"Default is 1 - perform iIDMA. 0 - no iIDMA.");
+
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+		"Enable on demand multiple queue pairs support "
+		"Default is 1 for supported. "
+		"Set it to 0 to turn off mq qpair support.");
+
+int ql2xfwloadbin;
+module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
+module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xfwloadbin,
+		"Option to specify location from which to load ISP firmware:.\n"
+		" 2 -- load firmware via the request_firmware() (hotplug).\n"
+		"      interface.\n"
+		" 1 -- load firmware from flash.\n"
+		" 0 -- use default semantics.\n");
+
+int ql2xetsenable;
+module_param(ql2xetsenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xetsenable,
+		"Enables firmware ETS burst."
+		"Default is 0 - skip ETS enablement.");
+
+int ql2xdbwr = 1;
+module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xdbwr,
+		"Option to specify scheme for request queue posting.\n"
+		" 0 -- Regular doorbell.\n"
+		" 1 -- CAMRAM doorbell (faster).\n");
+
+int ql2xtargetreset = 1;
+module_param(ql2xtargetreset, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xtargetreset,
+		 "Enable target reset."
+		 "Default is 1 - use hw defaults.");
+
+int ql2xgffidenable;
+module_param(ql2xgffidenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xgffidenable,
+		"Enables GFF_ID checks of port type. "
+		"Default is 0 - Do not use GFF_ID information.");
+
+int ql2xasynctmfenable = 1;
+module_param(ql2xasynctmfenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xasynctmfenable,
+		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
+		"Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+
+int ql2xdontresethba;
+module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xdontresethba,
+		"Option to specify reset behaviour.\n"
+		" 0 (Default) -- Reset on failure.\n"
+		" 1 -- Do not reset on failure.\n");
+
+uint64_t ql2xmaxlun = MAX_LUNS;
+module_param(ql2xmaxlun, ullong, S_IRUGO);
+MODULE_PARM_DESC(ql2xmaxlun,
+		"Defines the maximum LU number to register with the SCSI "
+		"midlayer. Default is 65535.");
+
+int ql2xmdcapmask = 0x1F;
+module_param(ql2xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmdcapmask,
+		"Set the Minidump driver capture mask level. "
+		"Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+
+int ql2xmdenable = 1;
+module_param(ql2xmdenable, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmdenable,
+		"Enable/disable MiniDump. "
+		"0 - MiniDump disabled. "
+		"1 (Default) - MiniDump enabled.");
+
+int ql2xexlogins = 0;
+module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xexlogins,
+		 "Number of extended Logins. "
+		 "0 (Default)- Disabled.");
+
+int ql2xexchoffld = 1024;
+module_param(ql2xexchoffld, uint, 0644);
+MODULE_PARM_DESC(ql2xexchoffld,
+	"Number of target exchanges.");
+
+int ql2xiniexchg = 1024;
+module_param(ql2xiniexchg, uint, 0644);
+MODULE_PARM_DESC(ql2xiniexchg,
+	"Number of initiator exchanges.");
+
+int ql2xfwholdabts = 0;
+module_param(ql2xfwholdabts, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xfwholdabts,
+		"Allow FW to hold status IOCB until ABTS rsp received. "
+		"0 (Default) Do not set fw option. "
+		"1 - Set fw option to hold ABTS.");
+
+int ql2xmvasynctoatio = 1;
+module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xmvasynctoatio,
+		"Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
+		"0 (Default). Do not move IOCBs"
+		"1 - Move IOCBs.");
+
+int ql2xautodetectsfp = 1;
+module_param(ql2xautodetectsfp, int, 0444);
+MODULE_PARM_DESC(ql2xautodetectsfp,
+		 "Detect SFP range and set appropriate distance.\n"
+		 "1 (Default): Enable\n");
+
+/*
+ * SCSI host template entry points
+ */
+static int qla2xxx_slave_configure(struct scsi_device * device);
+static int qla2xxx_slave_alloc(struct scsi_device *);
+static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
+static void qla2xxx_scan_start(struct Scsi_Host *);
+static void qla2xxx_slave_destroy(struct scsi_device *);
+static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+static int qla2xxx_eh_abort(struct scsi_cmnd *);
+static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
+static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
+
+static void qla2x00_clear_drv_active(struct qla_hw_data *);
+static void qla2x00_free_device(scsi_qla_host_t *);
+static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
+
+struct scsi_host_template qla2xxx_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= QLA2XXX_DRIVER_NAME,
+	.queuecommand		= qla2xxx_queuecommand,
+
+	.eh_timed_out		= fc_eh_timed_out,
+	.eh_abort_handler	= qla2xxx_eh_abort,
+	.eh_device_reset_handler = qla2xxx_eh_device_reset,
+	.eh_target_reset_handler = qla2xxx_eh_target_reset,
+	.eh_bus_reset_handler	= qla2xxx_eh_bus_reset,
+	.eh_host_reset_handler	= qla2xxx_eh_host_reset,
+
+	.slave_configure	= qla2xxx_slave_configure,
+
+	.slave_alloc		= qla2xxx_slave_alloc,
+	.slave_destroy		= qla2xxx_slave_destroy,
+	.scan_finished		= qla2xxx_scan_finished,
+	.scan_start		= qla2xxx_scan_start,
+	.change_queue_depth	= scsi_change_queue_depth,
+	.map_queues             = qla2xxx_map_queues,
+	.this_id		= -1,
+	.cmd_per_lun		= 3,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.sg_tablesize		= SG_ALL,
+
+	.max_sectors		= 0xFFFF,
+	.shost_attrs		= qla2x00_host_attrs,
+
+	.supported_mode		= MODE_INITIATOR,
+	.track_queue_depth	= 1,
+};
+
+static struct scsi_transport_template *qla2xxx_transport_template = NULL;
+struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
+
+/* TODO Convert to inlines
+ *
+ * Timer routines
+ */
+
+__inline__ void
+qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
+{
+	init_timer(&vha->timer);
+	vha->timer.expires = jiffies + interval * HZ;
+	vha->timer.data = (unsigned long)vha;
+	vha->timer.function = (void (*)(unsigned long))func;
+	add_timer(&vha->timer);
+	vha->timer_active = 1;
+}
+
+static inline void
+qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
+{
+	/* Currently used for 82XX only. */
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_dbg(ql_dbg_timer, vha, 0x600d,
+		    "Device in a failed state, returning.\n");
+		return;
+	}
+
+	mod_timer(&vha->timer, jiffies + interval * HZ);
+}
+
+static __inline__ void
+qla2x00_stop_timer(scsi_qla_host_t *vha)
+{
+	del_timer_sync(&vha->timer);
+	vha->timer_active = 0;
+}
+
+static int qla2x00_do_dpc(void *data);
+
+static void qla2x00_rst_aen(scsi_qla_host_t *);
+
+static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
+	struct req_que **, struct rsp_que **);
+static void qla2x00_free_fw_dump(struct qla_hw_data *);
+static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+	struct qla_qpair *qpair);
+
+/* -------------------------------------------------------------------------- */
+static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
+    struct rsp_que *rsp)
+{
+	struct qla_hw_data *ha = vha->hw;
+	rsp->qpair = ha->base_qpair;
+	rsp->req = req;
+	ha->base_qpair->req = req;
+	ha->base_qpair->rsp = rsp;
+	ha->base_qpair->vha = vha;
+	ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
+	ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
+	ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+	INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+	INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
+	ha->base_qpair->enable_class_2 = ql2xenableclass2;
+	/* init qpair to this cpu. Will adjust at run time. */
+	qla_cpu_update(rsp->qpair, raw_smp_processor_id());
+	ha->base_qpair->pdev = ha->pdev;
+
+	if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+		ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
+}
+
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+				struct rsp_que *rsp)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
+				GFP_KERNEL);
+	if (!ha->req_q_map) {
+		ql_log(ql_log_fatal, vha, 0x003b,
+		    "Unable to allocate memory for request queue ptrs.\n");
+		goto fail_req_map;
+	}
+
+	ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
+				GFP_KERNEL);
+	if (!ha->rsp_q_map) {
+		ql_log(ql_log_fatal, vha, 0x003c,
+		    "Unable to allocate memory for response queue ptrs.\n");
+		goto fail_rsp_map;
+	}
+
+	ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+	if (ha->base_qpair == NULL) {
+		ql_log(ql_log_warn, vha, 0x00e0,
+		    "Failed to allocate base queue pair memory.\n");
+		goto fail_base_qpair;
+	}
+
+	qla_init_base_qpair(vha, req, rsp);
+
+	if (ql2xmqsupport && ha->max_qpairs) {
+		ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+			GFP_KERNEL);
+		if (!ha->queue_pair_map) {
+			ql_log(ql_log_fatal, vha, 0x0180,
+			    "Unable to allocate memory for queue pair ptrs.\n");
+			goto fail_qpair_map;
+		}
+	}
+
+	/*
+	 * Make sure we record at least the request and response queue zero in
+	 * case we need to free them if part of the probe fails.
+	 */
+	ha->rsp_q_map[0] = rsp;
+	ha->req_q_map[0] = req;
+	set_bit(0, ha->rsp_qid_map);
+	set_bit(0, ha->req_qid_map);
+	return 0;
+
+fail_qpair_map:
+	kfree(ha->base_qpair);
+	ha->base_qpair = NULL;
+fail_base_qpair:
+	kfree(ha->rsp_q_map);
+	ha->rsp_q_map = NULL;
+fail_rsp_map:
+	kfree(ha->req_q_map);
+	ha->req_q_map = NULL;
+fail_req_map:
+	return -ENOMEM;
+}
+
+static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
+{
+	if (IS_QLAFX00(ha)) {
+		if (req && req->ring_fx00)
+			dma_free_coherent(&ha->pdev->dev,
+			    (req->length_fx00 + 1) * sizeof(request_t),
+			    req->ring_fx00, req->dma_fx00);
+	} else if (req && req->ring)
+		dma_free_coherent(&ha->pdev->dev,
+		(req->length + 1) * sizeof(request_t),
+		req->ring, req->dma);
+
+	if (req)
+		kfree(req->outstanding_cmds);
+
+	kfree(req);
+}
+
+static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+	if (IS_QLAFX00(ha)) {
+		if (rsp && rsp->ring)
+			dma_free_coherent(&ha->pdev->dev,
+			    (rsp->length_fx00 + 1) * sizeof(request_t),
+			    rsp->ring_fx00, rsp->dma_fx00);
+	} else if (rsp && rsp->ring) {
+		dma_free_coherent(&ha->pdev->dev,
+		(rsp->length + 1) * sizeof(response_t),
+		rsp->ring, rsp->dma);
+	}
+	kfree(rsp);
+}
+
+static void qla2x00_free_queues(struct qla_hw_data *ha)
+{
+	struct req_que *req;
+	struct rsp_que *rsp;
+	int cnt;
+	unsigned long flags;
+
+	if (ha->queue_pair_map) {
+		kfree(ha->queue_pair_map);
+		ha->queue_pair_map = NULL;
+	}
+	if (ha->base_qpair) {
+		kfree(ha->base_qpair);
+		ha->base_qpair = NULL;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+		if (!test_bit(cnt, ha->req_qid_map))
+			continue;
+
+		req = ha->req_q_map[cnt];
+		clear_bit(cnt, ha->req_qid_map);
+		ha->req_q_map[cnt] = NULL;
+
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		qla2x00_free_req_que(ha, req);
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	kfree(ha->req_q_map);
+	ha->req_q_map = NULL;
+
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+		if (!test_bit(cnt, ha->rsp_qid_map))
+			continue;
+
+		rsp = ha->rsp_q_map[cnt];
+		clear_bit(cnt, ha->rsp_qid_map);
+		ha->rsp_q_map[cnt] =  NULL;
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		qla2x00_free_rsp_que(ha, rsp);
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	kfree(ha->rsp_q_map);
+	ha->rsp_q_map = NULL;
+}
+
+static char *
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+	struct qla_hw_data *ha = vha->hw;
+	static char *pci_bus_modes[] = {
+		"33", "66", "100", "133",
+	};
+	uint16_t pci_bus;
+
+	strcpy(str, "PCI");
+	pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
+	if (pci_bus) {
+		strcat(str, "-X (");
+		strcat(str, pci_bus_modes[pci_bus]);
+	} else {
+		pci_bus = (ha->pci_attr & BIT_8) >> 8;
+		strcat(str, " (");
+		strcat(str, pci_bus_modes[pci_bus]);
+	}
+	strcat(str, " MHz)");
+
+	return (str);
+}
+
+static char *
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+	static char *pci_bus_modes[] = { "33", "66", "100", "133", };
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t pci_bus;
+
+	if (pci_is_pcie(ha->pdev)) {
+		char lwstr[6];
+		uint32_t lstat, lspeed, lwidth;
+
+		pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+		lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+		lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
+
+		strcpy(str, "PCIe (");
+		switch (lspeed) {
+		case 1:
+			strcat(str, "2.5GT/s ");
+			break;
+		case 2:
+			strcat(str, "5.0GT/s ");
+			break;
+		case 3:
+			strcat(str, "8.0GT/s ");
+			break;
+		default:
+			strcat(str, "<unknown> ");
+			break;
+		}
+		snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
+		strcat(str, lwstr);
+
+		return str;
+	}
+
+	strcpy(str, "PCI");
+	pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
+	if (pci_bus == 0 || pci_bus == 8) {
+		strcat(str, " (");
+		strcat(str, pci_bus_modes[pci_bus >> 3]);
+	} else {
+		strcat(str, "-X ");
+		if (pci_bus & BIT_2)
+			strcat(str, "Mode 2");
+		else
+			strcat(str, "Mode 1");
+		strcat(str, " (");
+		strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
+	}
+	strcat(str, " MHz)");
+
+	return str;
+}
+
+static char *
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
+{
+	char un_str[10];
+	struct qla_hw_data *ha = vha->hw;
+
+	snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
+	    ha->fw_minor_version, ha->fw_subminor_version);
+
+	if (ha->fw_attributes & BIT_9) {
+		strcat(str, "FLX");
+		return (str);
+	}
+
+	switch (ha->fw_attributes & 0xFF) {
+	case 0x7:
+		strcat(str, "EF");
+		break;
+	case 0x17:
+		strcat(str, "TP");
+		break;
+	case 0x37:
+		strcat(str, "IP");
+		break;
+	case 0x77:
+		strcat(str, "VI");
+		break;
+	default:
+		sprintf(un_str, "(%x)", ha->fw_attributes);
+		strcat(str, un_str);
+		break;
+	}
+	if (ha->fw_attributes & 0x100)
+		strcat(str, "X");
+
+	return (str);
+}
+
+static char *
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
+	    ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
+	return str;
+}
+
+void
+qla2x00_sp_free_dma(void *ptr)
+{
+	srb_t *sp = ptr;
+	struct qla_hw_data *ha = sp->vha->hw;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	void *ctx = GET_CMD_CTX_SP(sp);
+
+	if (sp->flags & SRB_DMA_VALID) {
+		scsi_dma_unmap(cmd);
+		sp->flags &= ~SRB_DMA_VALID;
+	}
+
+	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+	}
+
+	if (!ctx)
+		goto end;
+
+	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+		/* List assured to be having elements */
+		qla2x00_clean_dsd_pool(ha, ctx);
+		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+	}
+
+	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+		struct crc_context *ctx0 = ctx;
+
+		dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
+		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+	}
+
+	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+		struct ct6_dsd *ctx1 = ctx;
+
+		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+		    ctx1->fcp_cmnd_dma);
+		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+		mempool_free(ctx1, ha->ctx_mempool);
+	}
+
+end:
+	if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
+		CMD_SP(cmd) = NULL;
+		qla2x00_rel_sp(sp);
+	}
+}
+
+void
+qla2x00_sp_compl(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+	cmd->result = res;
+
+	if (atomic_read(&sp->ref_count) == 0) {
+		ql_dbg(ql_dbg_io, sp->vha, 0x3015,
+		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+		    sp, GET_CMD_SP(sp));
+		if (ql2xextended_error_logging & ql_dbg_io)
+			WARN_ON(atomic_read(&sp->ref_count) == 0);
+		return;
+	}
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	sp->free(sp);
+	cmd->scsi_done(cmd);
+}
+
+void
+qla2xxx_qpair_sp_free_dma(void *ptr)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct qla_hw_data *ha = sp->fcport->vha->hw;
+	void *ctx = GET_CMD_CTX_SP(sp);
+
+	if (sp->flags & SRB_DMA_VALID) {
+		scsi_dma_unmap(cmd);
+		sp->flags &= ~SRB_DMA_VALID;
+	}
+
+	if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+		dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+		sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+	}
+
+	if (!ctx)
+		goto end;
+
+	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+		/* List assured to be having elements */
+		qla2x00_clean_dsd_pool(ha, ctx);
+		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+	}
+
+	if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+		struct crc_context *ctx0 = ctx;
+
+		dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
+		sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+	}
+
+	if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+		struct ct6_dsd *ctx1 = ctx;
+		dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+		    ctx1->fcp_cmnd_dma);
+		list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+		ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+		ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+		mempool_free(ctx1, ha->ctx_mempool);
+	}
+end:
+	CMD_SP(cmd) = NULL;
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+	cmd->result = res;
+
+	if (atomic_read(&sp->ref_count) == 0) {
+		ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+		    sp, GET_CMD_SP(sp));
+		if (ql2xextended_error_logging & ql_dbg_io)
+			WARN_ON(atomic_read(&sp->ref_count) == 0);
+		return;
+	}
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	sp->free(sp);
+	cmd->scsi_done(cmd);
+}
+
+/* If we are SP1 here, we need to still take and release the host_lock as SP1
+ * does not have the changes necessary to avoid taking host->host_lock.
+ */
+static int
+qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+	scsi_qla_host_t *vha = shost_priv(host);
+	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	srb_t *sp;
+	int rval;
+	struct qla_qpair *qpair = NULL;
+	uint32_t tag;
+	uint16_t hwq;
+
+	if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+		cmd->result = DID_NO_CONNECT << 16;
+		goto qc24_fail_command;
+	}
+
+	if (ha->mqenable) {
+		if (shost_use_blk_mq(vha->host)) {
+			tag = blk_mq_unique_tag(cmd->request);
+			hwq = blk_mq_unique_tag_to_hwq(tag);
+			qpair = ha->queue_pair_map[hwq];
+		} else if (vha->vp_idx && vha->qpair) {
+			qpair = vha->qpair;
+		}
+
+		if (qpair)
+			return qla2xxx_mqueuecommand(host, cmd, qpair);
+	}
+
+	if (ha->flags.eeh_busy) {
+		if (ha->flags.pci_channel_io_perm_failure) {
+			ql_dbg(ql_dbg_aer, vha, 0x9010,
+			    "PCI Channel IO permanent failure, exiting "
+			    "cmd=%p.\n", cmd);
+			cmd->result = DID_NO_CONNECT << 16;
+		} else {
+			ql_dbg(ql_dbg_aer, vha, 0x9011,
+			    "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
+			cmd->result = DID_REQUEUE << 16;
+		}
+		goto qc24_fail_command;
+	}
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval) {
+		cmd->result = rval;
+		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
+		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+		    cmd, rval);
+		goto qc24_fail_command;
+	}
+
+	if (!vha->flags.difdix_supported &&
+		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+			ql_dbg(ql_dbg_io, vha, 0x3004,
+			    "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
+			    cmd);
+			cmd->result = DID_NO_CONNECT << 16;
+			goto qc24_fail_command;
+	}
+
+	if (!fcport) {
+		cmd->result = DID_NO_CONNECT << 16;
+		goto qc24_fail_command;
+	}
+
+	if (atomic_read(&fcport->state) != FCS_ONLINE) {
+		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+			ql_dbg(ql_dbg_io, vha, 0x3005,
+			    "Returning DNC, fcport_state=%d loop_state=%d.\n",
+			    atomic_read(&fcport->state),
+			    atomic_read(&base_vha->loop_state));
+			cmd->result = DID_NO_CONNECT << 16;
+			goto qc24_fail_command;
+		}
+		goto qc24_target_busy;
+	}
+
+	/*
+	 * Return target busy if we've received a non-zero retry_delay_timer
+	 * in a FCP_RSP.
+	 */
+	if (fcport->retry_delay_timestamp == 0) {
+		/* retry delay not set */
+	} else if (time_after(jiffies, fcport->retry_delay_timestamp))
+		fcport->retry_delay_timestamp = 0;
+	else
+		goto qc24_target_busy;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+	if (!sp)
+		goto qc24_host_busy;
+
+	sp->u.scmd.cmd = cmd;
+	sp->type = SRB_SCSI_CMD;
+	atomic_set(&sp->ref_count, 1);
+	CMD_SP(cmd) = (void *)sp;
+	sp->free = qla2x00_sp_free_dma;
+	sp->done = qla2x00_sp_compl;
+
+	rval = ha->isp_ops->start_scsi(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
+		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+		goto qc24_host_busy_free_sp;
+	}
+
+	return 0;
+
+qc24_host_busy_free_sp:
+	sp->free(sp);
+
+qc24_host_busy:
+	return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+	return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+	cmd->scsi_done(cmd);
+
+	return 0;
+}
+
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+    struct qla_qpair *qpair)
+{
+	scsi_qla_host_t *vha = shost_priv(host);
+	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+	srb_t *sp;
+	int rval;
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval) {
+		cmd->result = rval;
+		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+		    "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+		    cmd, rval);
+		goto qc24_fail_command;
+	}
+
+	if (!fcport) {
+		cmd->result = DID_NO_CONNECT << 16;
+		goto qc24_fail_command;
+	}
+
+	if (atomic_read(&fcport->state) != FCS_ONLINE) {
+		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+			ql_dbg(ql_dbg_io, vha, 0x3077,
+			    "Returning DNC, fcport_state=%d loop_state=%d.\n",
+			    atomic_read(&fcport->state),
+			    atomic_read(&base_vha->loop_state));
+			cmd->result = DID_NO_CONNECT << 16;
+			goto qc24_fail_command;
+		}
+		goto qc24_target_busy;
+	}
+
+	/*
+	 * Return target busy if we've received a non-zero retry_delay_timer
+	 * in a FCP_RSP.
+	 */
+	if (fcport->retry_delay_timestamp == 0) {
+		/* retry delay not set */
+	} else if (time_after(jiffies, fcport->retry_delay_timestamp))
+		fcport->retry_delay_timestamp = 0;
+	else
+		goto qc24_target_busy;
+
+	sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+	if (!sp)
+		goto qc24_host_busy;
+
+	sp->u.scmd.cmd = cmd;
+	sp->type = SRB_SCSI_CMD;
+	atomic_set(&sp->ref_count, 1);
+	CMD_SP(cmd) = (void *)sp;
+	sp->free = qla2xxx_qpair_sp_free_dma;
+	sp->done = qla2xxx_qpair_sp_compl;
+	sp->qpair = qpair;
+
+	rval = ha->isp_ops->start_scsi_mq(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+		if (rval == QLA_INTERFACE_ERROR)
+			goto qc24_fail_command;
+		goto qc24_host_busy_free_sp;
+	}
+
+	return 0;
+
+qc24_host_busy_free_sp:
+	sp->free(sp);
+
+qc24_host_busy:
+	return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+	return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+	cmd->scsi_done(cmd);
+
+	return 0;
+}
+
+/*
+ * qla2x00_eh_wait_on_command
+ *    Waits for the command to be returned by the Firmware for some
+ *    max time.
+ *
+ * Input:
+ *    cmd = Scsi Command to wait on.
+ *
+ * Return:
+ *    Not Found : 0
+ *    Found : 1
+ */
+static int
+qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
+{
+#define ABORT_POLLING_PERIOD	1000
+#define ABORT_WAIT_ITER		((2 * 1000) / (ABORT_POLLING_PERIOD))
+	unsigned long wait_iter = ABORT_WAIT_ITER;
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	struct qla_hw_data *ha = vha->hw;
+	int ret = QLA_SUCCESS;
+
+	if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
+		ql_dbg(ql_dbg_taskm, vha, 0x8005,
+		    "Return:eh_wait.\n");
+		return ret;
+	}
+
+	while (CMD_SP(cmd) && wait_iter--) {
+		msleep(ABORT_POLLING_PERIOD);
+	}
+	if (CMD_SP(cmd))
+		ret = QLA_FUNCTION_FAILED;
+
+	return ret;
+}
+
+/*
+ * qla2x00_wait_for_hba_online
+ *    Wait till the HBA is online after going through
+ *    <= MAX_RETRIES_OF_ISP_ABORT  or
+ *    finally HBA is disabled ie marked offline
+ *
+ * Input:
+ *     ha - pointer to host adapter structure
+ *
+ * Note:
+ *    Does context switching-Release SPIN_LOCK
+ *    (if any) before calling this routine.
+ *
+ * Return:
+ *    Success (Adapter is online) : 0
+ *    Failed  (Adapter is offline/disabled) : 1
+ */
+int
+qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
+{
+	int		return_status;
+	unsigned long	wait_online;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+	    ha->dpc_active) && time_before(jiffies, wait_online)) {
+
+		msleep(1000);
+	}
+	if (base_vha->flags.online)
+		return_status = QLA_SUCCESS;
+	else
+		return_status = QLA_FUNCTION_FAILED;
+
+	return (return_status);
+}
+
+static inline int test_fcport_count(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	int res;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	ql_dbg(ql_dbg_init, vha, 0x00ec,
+	    "tgt %p, fcport_count=%d\n",
+	    vha, vha->fcport_count);
+	res = (vha->fcport_count == 0);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	return res;
+}
+
+/*
+ * qla2x00_wait_for_sess_deletion can only be called from remove_one.
+ * it has dependency on UNLOADING flag to stop device discovery
+ */
+void
+qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
+{
+	qla2x00_mark_all_devices_lost(vha, 0);
+
+	wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
+}
+
+/*
+ * qla2x00_wait_for_hba_ready
+ * Wait till the HBA is ready before doing driver unload
+ *
+ * Input:
+ *     ha - pointer to host adapter structure
+ *
+ * Note:
+ *    Does context switching-Release SPIN_LOCK
+ *    (if any) before calling this routine.
+ *
+ */
+static void
+qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	while ((qla2x00_reset_active(vha) || ha->dpc_active ||
+		ha->flags.mbox_busy) ||
+	       test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+	       test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
+		if (test_bit(UNLOADING, &base_vha->dpc_flags))
+			break;
+		msleep(1000);
+	}
+}
+
+int
+qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
+{
+	int		return_status;
+	unsigned long	wait_reset;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
+	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+	    ha->dpc_active) && time_before(jiffies, wait_reset)) {
+
+		msleep(1000);
+
+		if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+		    ha->flags.chip_reset_done)
+			break;
+	}
+	if (ha->flags.chip_reset_done)
+		return_status = QLA_SUCCESS;
+	else
+		return_status = QLA_FUNCTION_FAILED;
+
+	return return_status;
+}
+
+static void
+sp_get(struct srb *sp)
+{
+	atomic_inc(&sp->ref_count);
+}
+
+#define ISP_REG_DISCONNECT 0xffffffffU
+/**************************************************************************
+* qla2x00_isp_reg_stat
+*
+* Description:
+*	Read the host status register of ISP before aborting the command.
+*
+* Input:
+*	ha = pointer to host adapter structure.
+*
+*
+* Returns:
+*	Either true or false.
+*
+* Note:	Return true if there is register disconnect.
+**************************************************************************/
+static inline
+uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
+{
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+
+	if (IS_P3P_TYPE(ha))
+		return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+	else
+		return ((RD_REG_DWORD(&reg->host_status)) ==
+			ISP_REG_DISCONNECT);
+}
+
+/**************************************************************************
+* qla2xxx_eh_abort
+*
+* Description:
+*    The abort function will abort the specified command.
+*
+* Input:
+*    cmd = Linux SCSI command packet to be aborted.
+*
+* Returns:
+*    Either SUCCESS or FAILED.
+*
+* Note:
+*    Only return FAILED if command not returned by firmware.
+**************************************************************************/
+static int
+qla2xxx_eh_abort(struct scsi_cmnd *cmd)
+{
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	srb_t *sp;
+	int ret;
+	unsigned int id;
+	uint64_t lun;
+	unsigned long flags;
+	int rval, wait = 0;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (qla2x00_isp_reg_stat(ha)) {
+		ql_log(ql_log_info, vha, 0x8042,
+		    "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+	if (!CMD_SP(cmd))
+		return SUCCESS;
+
+	ret = fc_block_scsi_eh(cmd);
+	if (ret != 0)
+		return ret;
+	ret = SUCCESS;
+
+	id = cmd->device->id;
+	lun = cmd->device->lun;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	sp = (srb_t *) CMD_SP(cmd);
+	if (!sp) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return SUCCESS;
+	}
+
+	ql_dbg(ql_dbg_taskm, vha, 0x8002,
+	    "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
+	    vha->host_no, id, lun, sp, cmd, sp->handle);
+
+	/* Get a reference to the sp and drop the lock.*/
+	sp_get(sp);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	rval = ha->isp_ops->abort_command(sp);
+	if (rval) {
+		if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+			ret = SUCCESS;
+		else
+			ret = FAILED;
+
+		ql_dbg(ql_dbg_taskm, vha, 0x8003,
+		    "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
+	} else {
+		ql_dbg(ql_dbg_taskm, vha, 0x8004,
+		    "Abort command mbx success cmd=%p.\n", cmd);
+		wait = 1;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	sp->done(sp, 0);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/* Did the command return during mailbox execution? */
+	if (ret == FAILED && !CMD_SP(cmd))
+		ret = SUCCESS;
+
+	/* Wait for the command to be returned. */
+	if (wait) {
+		if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x8006,
+			    "Abort handler timed out cmd=%p.\n", cmd);
+			ret = FAILED;
+		}
+	}
+
+	ql_log(ql_log_info, vha, 0x801c,
+	    "Abort command issued nexus=%ld:%d:%llu --  %d %x.\n",
+	    vha->host_no, id, lun, wait, ret);
+
+	return ret;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+	uint64_t l, enum nexus_wait_type type)
+{
+	int cnt, match, status;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+	srb_t *sp;
+	struct scsi_cmnd *cmd;
+
+	status = QLA_SUCCESS;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	req = vha->req;
+	for (cnt = 1; status == QLA_SUCCESS &&
+		cnt < req->num_outstanding_cmds; cnt++) {
+		sp = req->outstanding_cmds[cnt];
+		if (!sp)
+			continue;
+		if (sp->type != SRB_SCSI_CMD)
+			continue;
+		if (vha->vp_idx != sp->vha->vp_idx)
+			continue;
+		match = 0;
+		cmd = GET_CMD_SP(sp);
+		switch (type) {
+		case WAIT_HOST:
+			match = 1;
+			break;
+		case WAIT_TARGET:
+			match = cmd->device->id == t;
+			break;
+		case WAIT_LUN:
+			match = (cmd->device->id == t &&
+				cmd->device->lun == l);
+			break;
+		}
+		if (!match)
+			continue;
+
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		status = qla2x00_eh_wait_on_command(cmd);
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return status;
+}
+
+static char *reset_errors[] = {
+	"HBA not online",
+	"HBA not ready",
+	"Task management failed",
+	"Waiting for command completions",
+};
+
+static int
+__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
+    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
+{
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+	int err;
+
+	if (!fcport) {
+		return FAILED;
+	}
+
+	err = fc_block_scsi_eh(cmd);
+	if (err != 0)
+		return err;
+
+	ql_log(ql_log_info, vha, 0x8009,
+	    "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
+	    cmd->device->id, cmd->device->lun, cmd);
+
+	err = 0;
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x800a,
+		    "Wait for hba online failed for cmd=%p.\n", cmd);
+		goto eh_reset_failed;
+	}
+	err = 2;
+	if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
+		!= QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x800c,
+		    "do_reset failed for cmd=%p.\n", cmd);
+		goto eh_reset_failed;
+	}
+	err = 3;
+	if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
+	    cmd->device->lun, type) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x800d,
+		    "wait for pending cmds failed for cmd=%p.\n", cmd);
+		goto eh_reset_failed;
+	}
+
+	ql_log(ql_log_info, vha, 0x800e,
+	    "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
+	    vha->host_no, cmd->device->id, cmd->device->lun, cmd);
+
+	return SUCCESS;
+
+eh_reset_failed:
+	ql_log(ql_log_info, vha, 0x800f,
+	    "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
+	    reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+	    cmd);
+	return FAILED;
+}
+
+static int
+qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
+{
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	struct qla_hw_data *ha = vha->hw;
+
+	if (qla2x00_isp_reg_stat(ha)) {
+		ql_log(ql_log_info, vha, 0x803e,
+		    "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
+	    ha->isp_ops->lun_reset);
+}
+
+static int
+qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
+{
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	struct qla_hw_data *ha = vha->hw;
+
+	if (qla2x00_isp_reg_stat(ha)) {
+		ql_log(ql_log_info, vha, 0x803f,
+		    "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
+	    ha->isp_ops->target_reset);
+}
+
+/**************************************************************************
+* qla2xxx_eh_bus_reset
+*
+* Description:
+*    The bus reset function will reset the bus and abort any executing
+*    commands.
+*
+* Input:
+*    cmd = Linux SCSI command packet of the command that cause the
+*          bus reset.
+*
+* Returns:
+*    SUCCESS/FAILURE (defined as macro in scsi.h).
+*
+**************************************************************************/
+static int
+qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
+{
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+	int ret = FAILED;
+	unsigned int id;
+	uint64_t lun;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (qla2x00_isp_reg_stat(ha)) {
+		ql_log(ql_log_info, vha, 0x8040,
+		    "PCI/Register disconnect, exiting.\n");
+		return FAILED;
+	}
+
+	id = cmd->device->id;
+	lun = cmd->device->lun;
+
+	if (!fcport) {
+		return ret;
+	}
+
+	ret = fc_block_scsi_eh(cmd);
+	if (ret != 0)
+		return ret;
+	ret = FAILED;
+
+	ql_log(ql_log_info, vha, 0x8012,
+	    "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
+
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0x8013,
+		    "Wait for hba online failed board disabled.\n");
+		goto eh_bus_reset_done;
+	}
+
+	if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+		ret = SUCCESS;
+
+	if (ret == FAILED)
+		goto eh_bus_reset_done;
+
+	/* Flush outstanding commands. */
+	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
+	    QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x8014,
+		    "Wait for pending commands failed.\n");
+		ret = FAILED;
+	}
+
+eh_bus_reset_done:
+	ql_log(ql_log_warn, vha, 0x802b,
+	    "BUS RESET %s nexus=%ld:%d:%llu.\n",
+	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
+
+	return ret;
+}
+
+/**************************************************************************
+* qla2xxx_eh_host_reset
+*
+* Description:
+*    The reset function will reset the Adapter.
+*
+* Input:
+*      cmd = Linux SCSI command packet of the command that cause the
+*            adapter reset.
+*
+* Returns:
+*      Either SUCCESS or FAILED.
+*
+* Note:
+**************************************************************************/
+static int
+qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
+{
+	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+	struct qla_hw_data *ha = vha->hw;
+	int ret = FAILED;
+	unsigned int id;
+	uint64_t lun;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	if (qla2x00_isp_reg_stat(ha)) {
+		ql_log(ql_log_info, vha, 0x8041,
+		    "PCI/Register disconnect, exiting.\n");
+		schedule_work(&ha->board_disable);
+		return SUCCESS;
+	}
+
+	id = cmd->device->id;
+	lun = cmd->device->lun;
+
+	ql_log(ql_log_info, vha, 0x8018,
+	    "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
+
+	/*
+	 * No point in issuing another reset if one is active.  Also do not
+	 * attempt a reset if we are updating flash.
+	 */
+	if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
+		goto eh_host_reset_lock;
+
+	if (vha != base_vha) {
+		if (qla2x00_vp_abort_isp(vha))
+			goto eh_host_reset_lock;
+	} else {
+		if (IS_P3P_TYPE(vha->hw)) {
+			if (!qla82xx_fcoe_ctx_reset(vha)) {
+				/* Ctx reset success */
+				ret = SUCCESS;
+				goto eh_host_reset_lock;
+			}
+			/* fall thru if ctx reset failed */
+		}
+		if (ha->wq)
+			flush_workqueue(ha->wq);
+
+		set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+		if (ha->isp_ops->abort_isp(base_vha)) {
+			clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+			/* failed. schedule dpc to try */
+			set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+
+			if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0x802a,
+				    "wait for hba online failed.\n");
+				goto eh_host_reset_lock;
+			}
+		}
+		clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+	}
+
+	/* Waiting for command to be returned to OS.*/
+	if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
+		QLA_SUCCESS)
+		ret = SUCCESS;
+
+eh_host_reset_lock:
+	ql_log(ql_log_info, vha, 0x8017,
+	    "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
+	    (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
+
+	return ret;
+}
+
+/*
+* qla2x00_loop_reset
+*      Issue loop reset.
+*
+* Input:
+*      ha = adapter block pointer.
+*
+* Returns:
+*      0 = success
+*/
+int
+qla2x00_loop_reset(scsi_qla_host_t *vha)
+{
+	int ret;
+	struct fc_port *fcport;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLAFX00(ha)) {
+		return qlafx00_loop_reset(vha);
+	}
+
+	if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (fcport->port_type != FCT_TARGET)
+				continue;
+
+			ret = ha->isp_ops->target_reset(fcport, 0, 0);
+			if (ret != QLA_SUCCESS) {
+				ql_dbg(ql_dbg_taskm, vha, 0x802c,
+				    "Bus Reset failed: Reset=%d "
+				    "d_id=%x.\n", ret, fcport->d_id.b24);
+			}
+		}
+	}
+
+
+	if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+		qla2x00_mark_all_devices_lost(vha, 0);
+		ret = qla2x00_full_login_lip(vha);
+		if (ret != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_taskm, vha, 0x802d,
+			    "full_login_lip=%d.\n", ret);
+		}
+	}
+
+	if (ha->flags.enable_lip_reset) {
+		ret = qla2x00_lip_reset(vha);
+		if (ret != QLA_SUCCESS)
+			ql_dbg(ql_dbg_taskm, vha, 0x802e,
+			    "lip_reset failed (%d).\n", ret);
+	}
+
+	/* Issue marker command only when we are going to start the I/O */
+	vha->marker_needed = 1;
+
+	return QLA_SUCCESS;
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+	int que, cnt, status;
+	unsigned long flags;
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct qla_tgt_cmd *cmd;
+	uint8_t trace = 0;
+
+	if (!ha->req_q_map)
+		return;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (que = 0; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			continue;
+		if (!req->outstanding_cmds)
+			continue;
+		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+			sp = req->outstanding_cmds[cnt];
+			if (sp) {
+				req->outstanding_cmds[cnt] = NULL;
+				if (sp->cmd_type == TYPE_SRB) {
+					if (sp->type == SRB_NVME_CMD ||
+					    sp->type == SRB_NVME_LS) {
+						sp_get(sp);
+						spin_unlock_irqrestore(
+						    &ha->hardware_lock, flags);
+						qla_nvme_abort(ha, sp);
+						spin_lock_irqsave(
+						    &ha->hardware_lock, flags);
+					} else if (GET_CMD_SP(sp) &&
+					    !ha->flags.eeh_busy &&
+					    (!test_bit(ABORT_ISP_ACTIVE,
+						&vha->dpc_flags)) &&
+					    (sp->type == SRB_SCSI_CMD)) {
+						/*
+						 * Don't abort commands in
+						 * adapter during EEH
+						 * recovery as it's not
+						 * accessible/responding.
+						 *
+						 * Get a reference to the sp
+						 * and drop the lock. The
+						 * reference ensures this
+						 * sp->done() call and not the
+						 * call in qla2xxx_eh_abort()
+						 * ends the SCSI command (with
+						 * result 'res').
+						 */
+						sp_get(sp);
+						spin_unlock_irqrestore(
+						    &ha->hardware_lock, flags);
+						status = qla2xxx_eh_abort(
+						    GET_CMD_SP(sp));
+						spin_lock_irqsave(
+						    &ha->hardware_lock, flags);
+						/*
+						 * Get rid of extra reference
+						 * if immediate exit from
+						 * ql2xxx_eh_abort
+						 */
+						if (status == FAILED &&
+						    (qla2x00_isp_reg_stat(ha)))
+							atomic_dec(
+							    &sp->ref_count);
+					}
+					sp->done(sp, res);
+				} else {
+					if (!vha->hw->tgt.tgt_ops || !tgt ||
+					    qla_ini_mode_enabled(vha)) {
+						if (!trace)
+							ql_dbg(ql_dbg_tgt_mgt,
+							    vha, 0xf003,
+							    "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+							    vha->dpc_flags);
+						continue;
+					}
+					cmd = (struct qla_tgt_cmd *)sp;
+					qlt_abort_cmd_on_host_reset(cmd->vha,
+					    cmd);
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static int
+qla2xxx_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	sdev->hostdata = *(fc_port_t **)rport->dd_data;
+
+	return 0;
+}
+
+static int
+qla2xxx_slave_configure(struct scsi_device *sdev)
+{
+	scsi_qla_host_t *vha = shost_priv(sdev->host);
+	struct req_que *req = vha->req;
+
+	if (IS_T10_PI_CAPABLE(vha->hw))
+		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
+	scsi_change_queue_depth(sdev, req->max_q_depth);
+	return 0;
+}
+
+static void
+qla2xxx_slave_destroy(struct scsi_device *sdev)
+{
+	sdev->hostdata = NULL;
+}
+
+/**
+ * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
+ * @ha: HA context
+ *
+ * At exit, the @ha's flags.enable_64bit_addressing set to indicated
+ * supported addressing method.
+ */
+static void
+qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+{
+	/* Assume a 32bit DMA mask. */
+	ha->flags.enable_64bit_addressing = 0;
+
+	if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+		/* Any upper-dword bits set? */
+		if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
+		    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+			/* Ok, a 64bit DMA mask is applicable. */
+			ha->flags.enable_64bit_addressing = 1;
+			ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+			ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
+			return;
+		}
+	}
+
+	dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
+	pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+}
+
+static void
+qla2x00_enable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 1;
+	/* enable risc and host interrupts */
+	WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+	RD_REG_WORD(&reg->ictrl);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+}
+
+static void
+qla2x00_disable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 0;
+	/* disable risc and host interrupts */
+	WRT_REG_WORD(&reg->ictrl, 0);
+	RD_REG_WORD(&reg->ictrl);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qla24xx_enable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 1;
+	WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
+	RD_REG_DWORD(&reg->ictrl);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qla24xx_disable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (IS_NOPOLLING_TYPE(ha))
+		return;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 0;
+	WRT_REG_DWORD(&reg->ictrl, 0);
+	RD_REG_DWORD(&reg->ictrl);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+	resource_size_t pio;
+	uint16_t msix;
+
+	if (pci_request_selected_regions(ha->pdev, ha->bars,
+	    QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (!(ha->bars & 1))
+		goto skip_pio;
+
+	/* We only need PIO for Flash operations on ISP2312 v2 chips. */
+	pio = pci_resource_start(ha->pdev, 0);
+	if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+		if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+			ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+			    "Invalid pci I/O region size (%s).\n",
+			    pci_name(ha->pdev));
+			pio = 0;
+		}
+	} else {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+		    "Region #0 no a PIO resource (%s).\n",
+		    pci_name(ha->pdev));
+		pio = 0;
+	}
+	ha->pio_address = pio;
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+	    "PIO address=%llu.\n",
+	    (unsigned long long)ha->pio_address);
+
+skip_pio:
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+		    "Region #1 not an MMIO resource (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+		    "Invalid PCI mem region size (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+	if (!ha->iobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+		    "Cannot remap MMIO (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* Determine queue resources */
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+	ha->msix_count = QLA_BASE_VECTORS;
+	if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+		goto mqiobase_exit;
+
+	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+			pci_resource_len(ha->pdev, 3));
+	if (ha->mqiobase) {
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+		    "MQIO Base=%p.\n", ha->mqiobase);
+		/* Read MSIX vector size of the board */
+		pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+		ha->msix_count = msix + 1;
+		/* Max queues are bounded by available msix vectors */
+		/* MB interrupt uses 1 vector */
+		ha->max_req_queues = ha->msix_count - 1;
+		ha->max_rsp_queues = ha->max_req_queues;
+		/* Queue pairs is the max value minus the base queue pair */
+		ha->max_qpairs = ha->max_rsp_queues - 1;
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+		    "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
+		ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+		    "MSI-X vector count: %d.\n", ha->msix_count);
+	} else
+		ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+		    "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+	    "MSIX Count: %d.\n", ha->msix_count);
+	return (0);
+
+iospace_error_exit:
+	return (-ENOMEM);
+}
+
+
+static int
+qla83xx_iospace_config(struct qla_hw_data *ha)
+{
+	uint16_t msix;
+
+	if (pci_request_selected_regions(ha->pdev, ha->bars,
+	    QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
+		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+		    pci_name(ha->pdev));
+
+		goto iospace_error_exit;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
+		    "Invalid pci I/O region size (%s).\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
+		    "Invalid PCI mem region size (%s), aborting\n",
+			pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
+	if (!ha->iobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
+		    "Cannot remap MMIO (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* 64bit PCI BAR - BAR2 will correspoond to region 4 */
+	/* 83XX 26XX always use MQ type access for queues
+	 * - mbar 2, a.k.a region 4 */
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+	ha->msix_count = QLA_BASE_VECTORS;
+	ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
+			pci_resource_len(ha->pdev, 4));
+
+	if (!ha->mqiobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
+		    "BAR2/region4 not enabled\n");
+		goto mqiobase_exit;
+	}
+
+	ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
+			pci_resource_len(ha->pdev, 2));
+	if (ha->msixbase) {
+		/* Read MSIX vector size of the board */
+		pci_read_config_word(ha->pdev,
+		    QLA_83XX_PCI_MSIX_CONTROL, &msix);
+		ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE)  + 1;
+		/*
+		 * By default, driver uses at least two msix vectors
+		 * (default & rspq)
+		 */
+		if (ql2xmqsupport) {
+			/* MB interrupt uses 1 vector */
+			ha->max_req_queues = ha->msix_count - 1;
+
+			/* ATIOQ needs 1 vector. That's 1 less QPair */
+			if (QLA_TGT_MODE_ENABLED())
+				ha->max_req_queues--;
+
+			ha->max_rsp_queues = ha->max_req_queues;
+
+			/* Queue pairs is the max value minus
+			 * the base queue pair */
+			ha->max_qpairs = ha->max_req_queues - 1;
+			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
+			    "Max no of queues pairs: %d.\n", ha->max_qpairs);
+		}
+		ql_log_pci(ql_log_info, ha->pdev, 0x011c,
+		    "MSI-X vector count: %d.\n", ha->msix_count);
+	} else
+		ql_log_pci(ql_log_info, ha->pdev, 0x011e,
+		    "BAR 1 not enabled.\n");
+
+mqiobase_exit:
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
+	    "MSIX Count: %d.\n", ha->msix_count);
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+static struct isp_operations qla2100_isp_ops = {
+	.pci_config		= qla2100_pci_config,
+	.reset_chip		= qla2x00_reset_chip,
+	.chip_diag		= qla2x00_chip_diag,
+	.config_rings		= qla2x00_config_rings,
+	.reset_adapter		= qla2x00_reset_adapter,
+	.nvram_config		= qla2x00_nvram_config,
+	.update_fw_options	= qla2x00_update_fw_options,
+	.load_risc		= qla2x00_load_risc,
+	.pci_info_str		= qla2x00_pci_info_str,
+	.fw_version_str		= qla2x00_fw_version_str,
+	.intr_handler		= qla2100_intr_handler,
+	.enable_intrs		= qla2x00_enable_intrs,
+	.disable_intrs		= qla2x00_disable_intrs,
+	.abort_command		= qla2x00_abort_command,
+	.target_reset		= qla2x00_abort_target,
+	.lun_reset		= qla2x00_lun_reset,
+	.fabric_login		= qla2x00_login_fabric,
+	.fabric_logout		= qla2x00_fabric_logout,
+	.calc_req_entries	= qla2x00_calc_iocbs_32,
+	.build_iocbs		= qla2x00_build_scsi_iocbs_32,
+	.prep_ms_iocb		= qla2x00_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla2x00_prep_ms_fdmi_iocb,
+	.read_nvram		= qla2x00_read_nvram_data,
+	.write_nvram		= qla2x00_write_nvram_data,
+	.fw_dump		= qla2100_fw_dump,
+	.beacon_on		= NULL,
+	.beacon_off		= NULL,
+	.beacon_blink		= NULL,
+	.read_optrom		= qla2x00_read_optrom_data,
+	.write_optrom		= qla2x00_write_optrom_data,
+	.get_flash_version	= qla2x00_get_flash_version,
+	.start_scsi		= qla2x00_start_scsi,
+	.start_scsi_mq          = NULL,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config     	= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla2300_isp_ops = {
+	.pci_config		= qla2300_pci_config,
+	.reset_chip		= qla2x00_reset_chip,
+	.chip_diag		= qla2x00_chip_diag,
+	.config_rings		= qla2x00_config_rings,
+	.reset_adapter		= qla2x00_reset_adapter,
+	.nvram_config		= qla2x00_nvram_config,
+	.update_fw_options	= qla2x00_update_fw_options,
+	.load_risc		= qla2x00_load_risc,
+	.pci_info_str		= qla2x00_pci_info_str,
+	.fw_version_str		= qla2x00_fw_version_str,
+	.intr_handler		= qla2300_intr_handler,
+	.enable_intrs		= qla2x00_enable_intrs,
+	.disable_intrs		= qla2x00_disable_intrs,
+	.abort_command		= qla2x00_abort_command,
+	.target_reset		= qla2x00_abort_target,
+	.lun_reset		= qla2x00_lun_reset,
+	.fabric_login		= qla2x00_login_fabric,
+	.fabric_logout		= qla2x00_fabric_logout,
+	.calc_req_entries	= qla2x00_calc_iocbs_32,
+	.build_iocbs		= qla2x00_build_scsi_iocbs_32,
+	.prep_ms_iocb		= qla2x00_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla2x00_prep_ms_fdmi_iocb,
+	.read_nvram		= qla2x00_read_nvram_data,
+	.write_nvram		= qla2x00_write_nvram_data,
+	.fw_dump		= qla2300_fw_dump,
+	.beacon_on		= qla2x00_beacon_on,
+	.beacon_off		= qla2x00_beacon_off,
+	.beacon_blink		= qla2x00_beacon_blink,
+	.read_optrom		= qla2x00_read_optrom_data,
+	.write_optrom		= qla2x00_write_optrom_data,
+	.get_flash_version	= qla2x00_get_flash_version,
+	.start_scsi		= qla2x00_start_scsi,
+	.start_scsi_mq          = NULL,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla24xx_isp_ops = {
+	.pci_config		= qla24xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla24xx_nvram_config,
+	.update_fw_options	= qla24xx_update_fw_options,
+	.load_risc		= qla24xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= qla24xx_read_nvram_data,
+	.write_nvram		= qla24xx_write_nvram_data,
+	.fw_dump		= qla24xx_fw_dump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla24xx_beacon_blink,
+	.read_optrom		= qla24xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_start_scsi,
+	.start_scsi_mq          = NULL,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla25xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla24xx_nvram_config,
+	.update_fw_options	= qla24xx_update_fw_options,
+	.load_risc		= qla24xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= qla25xx_read_nvram_data,
+	.write_nvram		= qla25xx_write_nvram_data,
+	.fw_dump		= qla25xx_fw_dump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla24xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_dif_start_scsi,
+	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla81xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla81xx_update_fw_options,
+	.load_risc		= qla81xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= NULL,
+	.write_nvram		= NULL,
+	.fw_dump		= qla81xx_fw_dump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla83xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_dif_start_scsi,
+	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla82xx_isp_ops = {
+	.pci_config		= qla82xx_pci_config,
+	.reset_chip		= qla82xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla82xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla24xx_update_fw_options,
+	.load_risc		= qla82xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla82xx_intr_handler,
+	.enable_intrs		= qla82xx_enable_intrs,
+	.disable_intrs		= qla82xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= qla24xx_read_nvram_data,
+	.write_nvram		= qla24xx_write_nvram_data,
+	.fw_dump		= qla82xx_fw_dump,
+	.beacon_on		= qla82xx_beacon_on,
+	.beacon_off		= qla82xx_beacon_off,
+	.beacon_blink		= NULL,
+	.read_optrom		= qla82xx_read_optrom_data,
+	.write_optrom		= qla82xx_write_optrom_data,
+	.get_flash_version	= qla82xx_get_flash_version,
+	.start_scsi             = qla82xx_start_scsi,
+	.start_scsi_mq          = NULL,
+	.abort_isp		= qla82xx_abort_isp,
+	.iospace_config     	= qla82xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla8044_isp_ops = {
+	.pci_config		= qla82xx_pci_config,
+	.reset_chip		= qla82xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla82xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla24xx_update_fw_options,
+	.load_risc		= qla82xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla8044_intr_handler,
+	.enable_intrs		= qla82xx_enable_intrs,
+	.disable_intrs		= qla82xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= NULL,
+	.write_nvram		= NULL,
+	.fw_dump		= qla8044_fw_dump,
+	.beacon_on		= qla82xx_beacon_on,
+	.beacon_off		= qla82xx_beacon_off,
+	.beacon_blink		= NULL,
+	.read_optrom		= qla8044_read_optrom_data,
+	.write_optrom		= qla8044_write_optrom_data,
+	.get_flash_version	= qla82xx_get_flash_version,
+	.start_scsi             = qla82xx_start_scsi,
+	.start_scsi_mq          = NULL,
+	.abort_isp		= qla8044_abort_isp,
+	.iospace_config		= qla82xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qla83xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla81xx_update_fw_options,
+	.load_risc		= qla81xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= NULL,
+	.write_nvram		= NULL,
+	.fw_dump		= qla83xx_fw_dump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla83xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_dif_start_scsi,
+	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla83xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qlafx00_isp_ops = {
+	.pci_config		= qlafx00_pci_config,
+	.reset_chip		= qlafx00_soft_reset,
+	.chip_diag		= qlafx00_chip_diag,
+	.config_rings		= qlafx00_config_rings,
+	.reset_adapter		= qlafx00_soft_reset,
+	.nvram_config		= NULL,
+	.update_fw_options	= NULL,
+	.load_risc		= NULL,
+	.pci_info_str		= qlafx00_pci_info_str,
+	.fw_version_str		= qlafx00_fw_version_str,
+	.intr_handler		= qlafx00_intr_handler,
+	.enable_intrs		= qlafx00_enable_intrs,
+	.disable_intrs		= qlafx00_disable_intrs,
+	.abort_command		= qla24xx_async_abort_command,
+	.target_reset		= qlafx00_abort_target,
+	.lun_reset		= qlafx00_lun_reset,
+	.fabric_login		= NULL,
+	.fabric_logout		= NULL,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= qla24xx_read_nvram_data,
+	.write_nvram		= qla24xx_write_nvram_data,
+	.fw_dump		= NULL,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= NULL,
+	.read_optrom		= qla24xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qlafx00_start_scsi,
+	.start_scsi_mq          = NULL,
+	.abort_isp		= qlafx00_abort_isp,
+	.iospace_config		= qlafx00_iospace_config,
+	.initialize_adapter	= qlafx00_initialize_adapter,
+};
+
+static struct isp_operations qla27xx_isp_ops = {
+	.pci_config		= qla25xx_pci_config,
+	.reset_chip		= qla24xx_reset_chip,
+	.chip_diag		= qla24xx_chip_diag,
+	.config_rings		= qla24xx_config_rings,
+	.reset_adapter		= qla24xx_reset_adapter,
+	.nvram_config		= qla81xx_nvram_config,
+	.update_fw_options	= qla81xx_update_fw_options,
+	.load_risc		= qla81xx_load_risc,
+	.pci_info_str		= qla24xx_pci_info_str,
+	.fw_version_str		= qla24xx_fw_version_str,
+	.intr_handler		= qla24xx_intr_handler,
+	.enable_intrs		= qla24xx_enable_intrs,
+	.disable_intrs		= qla24xx_disable_intrs,
+	.abort_command		= qla24xx_abort_command,
+	.target_reset		= qla24xx_abort_target,
+	.lun_reset		= qla24xx_lun_reset,
+	.fabric_login		= qla24xx_login_fabric,
+	.fabric_logout		= qla24xx_fabric_logout,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= NULL,
+	.write_nvram		= NULL,
+	.fw_dump		= qla27xx_fwdump,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= qla83xx_beacon_blink,
+	.read_optrom		= qla25xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qla24xx_dif_start_scsi,
+	.start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
+	.abort_isp		= qla2x00_abort_isp,
+	.iospace_config		= qla83xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static inline void
+qla2x00_set_isp_flags(struct qla_hw_data *ha)
+{
+	ha->device_type = DT_EXTENDED_IDS;
+	switch (ha->pdev->device) {
+	case PCI_DEVICE_ID_QLOGIC_ISP2100:
+		ha->isp_type |= DT_ISP2100;
+		ha->device_type &= ~DT_EXTENDED_IDS;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2100;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2200:
+		ha->isp_type |= DT_ISP2200;
+		ha->device_type &= ~DT_EXTENDED_IDS;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2100;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2300:
+		ha->isp_type |= DT_ISP2300;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2312:
+		ha->isp_type |= DT_ISP2312;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2322:
+		ha->isp_type |= DT_ISP2322;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		if (ha->pdev->subsystem_vendor == 0x1028 &&
+		    ha->pdev->subsystem_device == 0x0170)
+			ha->device_type |= DT_OEM_001;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP6312:
+		ha->isp_type |= DT_ISP6312;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP6322:
+		ha->isp_type |= DT_ISP6322;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2300;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2422:
+		ha->isp_type |= DT_ISP2422;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2432:
+		ha->isp_type |= DT_ISP2432;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8432:
+		ha->isp_type |= DT_ISP8432;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP5422:
+		ha->isp_type |= DT_ISP5422;
+		ha->device_type |= DT_FWI2;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP5432:
+		ha->isp_type |= DT_ISP5432;
+		ha->device_type |= DT_FWI2;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2532:
+		ha->isp_type |= DT_ISP2532;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8001:
+		ha->isp_type |= DT_ISP8001;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8021:
+		ha->isp_type |= DT_ISP8021;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		/* Initialize 82XX ISP flags */
+		qla82xx_init_flags(ha);
+		break;
+	 case PCI_DEVICE_ID_QLOGIC_ISP8044:
+		ha->isp_type |= DT_ISP8044;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		/* Initialize 82XX ISP flags */
+		qla82xx_init_flags(ha);
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2031:
+		ha->isp_type |= DT_ISP2031;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP8031:
+		ha->isp_type |= DT_ISP8031;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISPF001:
+		ha->isp_type |= DT_ISPFX00;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2071:
+		ha->isp_type |= DT_ISP2071;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2271:
+		ha->isp_type |= DT_ISP2271;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2261:
+		ha->isp_type |= DT_ISP2261;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->device_type |= DT_T10_PI;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
+	}
+
+	if (IS_QLA82XX(ha))
+		ha->port_no = ha->portnum & 1;
+	else {
+		/* Get adapter physical port no from interrupt pin register. */
+		pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+		if (IS_QLA27XX(ha))
+			ha->port_no--;
+		else
+			ha->port_no = !(ha->port_no & 1);
+	}
+
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
+	    "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
+	    ha->device_type, ha->port_no, ha->fw_srisc_address);
+}
+
+static void
+qla2xxx_scan_start(struct Scsi_Host *shost)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	if (vha->hw->flags.running_gold_fw)
+		return;
+
+	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+	set_bit(RSCN_UPDATE, &vha->dpc_flags);
+	set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
+}
+
+static int
+qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	scsi_qla_host_t *vha = shost_priv(shost);
+
+	if (test_bit(UNLOADING, &vha->dpc_flags))
+		return 1;
+	if (!vha->host)
+		return 1;
+	if (time > vha->hw->loop_reset_delay * HZ)
+		return 1;
+
+	return atomic_read(&vha->loop_state) == LOOP_READY;
+}
+
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+	struct scsi_qla_host *vha = container_of(work,
+		struct scsi_qla_host, iocb_work);
+	int cnt = 0;
+
+	while (!list_empty(&vha->work_list)) {
+		qla2x00_do_work(vha);
+		cnt++;
+		if (cnt > 10)
+			break;
+	}
+}
+
+/*
+ * PCI driver interface
+ */
+static int
+qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int	ret = -ENODEV;
+	struct Scsi_Host *host;
+	scsi_qla_host_t *base_vha = NULL;
+	struct qla_hw_data *ha;
+	char pci_info[30];
+	char fw_str[30], wq_name[30];
+	struct scsi_host_template *sht;
+	int bars, mem_only = 0;
+	uint16_t req_length = 0, rsp_length = 0;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	int i;
+
+	bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
+	sht = &qla2xxx_driver_template;
+	if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
+		bars = pci_select_bars(pdev, IORESOURCE_MEM);
+		mem_only = 1;
+		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
+		    "Mem only adapter.\n");
+	}
+	ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
+	    "Bars=%d.\n", bars);
+
+	if (mem_only) {
+		if (pci_enable_device_mem(pdev))
+			return ret;
+	} else {
+		if (pci_enable_device(pdev))
+			return ret;
+	}
+
+	/* This may fail but that's ok */
+	pci_enable_pcie_error_reporting(pdev);
+
+	ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
+	if (!ha) {
+		ql_log_pci(ql_log_fatal, pdev, 0x0009,
+		    "Unable to allocate memory for ha.\n");
+		goto disable_device;
+	}
+	ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
+	    "Memory allocated for ha=%p.\n", ha);
+	ha->pdev = pdev;
+	INIT_LIST_HEAD(&ha->tgt.q_full_list);
+	spin_lock_init(&ha->tgt.q_full_lock);
+	spin_lock_init(&ha->tgt.sess_lock);
+	spin_lock_init(&ha->tgt.atio_lock);
+
+	atomic_set(&ha->nvme_active_aen_cnt, 0);
+
+	/* Clear our data area */
+	ha->bars = bars;
+	ha->mem_only = mem_only;
+	spin_lock_init(&ha->hardware_lock);
+	spin_lock_init(&ha->vport_slock);
+	mutex_init(&ha->selflogin_lock);
+	mutex_init(&ha->optrom_mutex);
+
+	/* Set ISP-type information. */
+	qla2x00_set_isp_flags(ha);
+
+	/* Set EEH reset type to fundamental if required by hba */
+	if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
+	    IS_QLA83XX(ha) || IS_QLA27XX(ha))
+		pdev->needs_freset = 1;
+
+	ha->prev_topology = 0;
+	ha->init_cb_size = sizeof(init_cb_t);
+	ha->link_data_rate = PORT_SPEED_UNKNOWN;
+	ha->optrom_size = OPTROM_SIZE_2300;
+
+	/* Assign ISP specific operations. */
+	if (IS_QLA2100(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
+		req_length = REQUEST_ENTRY_CNT_2100;
+		rsp_length = RESPONSE_ENTRY_CNT_2100;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
+		ha->gid_list_info_size = 4;
+		ha->flash_conf_off = ~0;
+		ha->flash_data_off = ~0;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
+		ha->isp_ops = &qla2100_isp_ops;
+	} else if (IS_QLA2200(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
+		req_length = REQUEST_ENTRY_CNT_2200;
+		rsp_length = RESPONSE_ENTRY_CNT_2100;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
+		ha->gid_list_info_size = 4;
+		ha->flash_conf_off = ~0;
+		ha->flash_data_off = ~0;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
+		ha->isp_ops = &qla2100_isp_ops;
+	} else if (IS_QLA23XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_2200;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->gid_list_info_size = 6;
+		if (IS_QLA2322(ha) || IS_QLA6322(ha))
+			ha->optrom_size = OPTROM_SIZE_2322;
+		ha->flash_conf_off = ~0;
+		ha->flash_data_off = ~0;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
+		ha->isp_ops = &qla2300_isp_ops;
+	} else if (IS_QLA24XX_TYPE(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_24XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
+		ha->isp_ops = &qla24xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+	} else if (IS_QLA25XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_25XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla25xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+	} else if (IS_QLA81XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_24XX;
+		rsp_length = RESPONSE_ENTRY_CNT_2300;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_81XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla81xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
+	} else if (IS_QLA82XX(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_82XX;
+		rsp_length = RESPONSE_ENTRY_CNT_82XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_82XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla82xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+	} else if (IS_QLA8044(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_82XX;
+		rsp_length = RESPONSE_ENTRY_CNT_82XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_83XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla8044_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+		ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+		ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+	} else if (IS_QLA83XX(ha)) {
+		ha->portnum = PCI_FUNC(ha->pdev->devfn);
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_83XX;
+		rsp_length = RESPONSE_ENTRY_CNT_83XX;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_83XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla83xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
+	}  else if (IS_QLAFX00(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
+		ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
+		req_length = REQUEST_ENTRY_CNT_FX00;
+		rsp_length = RESPONSE_ENTRY_CNT_FX00;
+		ha->isp_ops = &qlafx00_isp_ops;
+		ha->port_down_retry_count = 30; /* default value */
+		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+		ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
+		ha->mr.fw_hbt_en = 1;
+		ha->mr.host_info_resend = false;
+		ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
+	} else if (IS_QLA27XX(ha)) {
+		ha->portnum = PCI_FUNC(ha->pdev->devfn);
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT;
+		req_length = REQUEST_ENTRY_CNT_83XX;
+		rsp_length = RESPONSE_ENTRY_CNT_83XX;
+		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+		ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+		ha->gid_list_info_size = 8;
+		ha->optrom_size = OPTROM_SIZE_83XX;
+		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+		ha->isp_ops = &qla27xx_isp_ops;
+		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
+		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+		ha->nvram_conf_off = ~0;
+		ha->nvram_data_off = ~0;
+	}
+
+	ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
+	    "mbx_count=%d, req_length=%d, "
+	    "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
+	    "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
+	    "max_fibre_devices=%d.\n",
+	    ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
+	    ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
+	    ha->nvram_npiv_size, ha->max_fibre_devices);
+	ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
+	    "isp_ops=%p, flash_conf_off=%d, "
+	    "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
+	    ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
+	    ha->nvram_conf_off, ha->nvram_data_off);
+
+	/* Configure PCI I/O space */
+	ret = ha->isp_ops->iospace_config(ha);
+	if (ret)
+		goto iospace_config_failed;
+
+	ql_log_pci(ql_log_info, pdev, 0x001d,
+	    "Found an ISP%04X irq %d iobase 0x%p.\n",
+	    pdev->device, pdev->irq, ha->iobase);
+	mutex_init(&ha->vport_lock);
+	mutex_init(&ha->mq_lock);
+	init_completion(&ha->mbx_cmd_comp);
+	complete(&ha->mbx_cmd_comp);
+	init_completion(&ha->mbx_intr_comp);
+	init_completion(&ha->dcbx_comp);
+	init_completion(&ha->lb_portup_comp);
+
+	set_bit(0, (unsigned long *) ha->vp_idx_map);
+
+	qla2x00_config_dma_addressing(ha);
+	ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
+	    "64 Bit addressing is %s.\n",
+	    ha->flags.enable_64bit_addressing ? "enable" :
+	    "disable");
+	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
+	if (ret) {
+		ql_log_pci(ql_log_fatal, pdev, 0x0031,
+		    "Failed to allocate memory for adapter, aborting.\n");
+
+		goto probe_hw_failed;
+	}
+
+	req->max_q_depth = MAX_Q_DEPTH;
+	if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
+		req->max_q_depth = ql2xmaxqdepth;
+
+
+	base_vha = qla2x00_create_host(sht, ha);
+	if (!base_vha) {
+		ret = -ENOMEM;
+		goto probe_hw_failed;
+	}
+
+	pci_set_drvdata(pdev, base_vha);
+	set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
+
+	host = base_vha->host;
+	base_vha->req = req;
+	if (IS_QLA2XXX_MIDTYPE(ha))
+		base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+	else
+		base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
+						base_vha->vp_idx;
+
+	/* Setup fcport template structure. */
+	ha->mr.fcport.vha = base_vha;
+	ha->mr.fcport.port_type = FCT_UNKNOWN;
+	ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
+	qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
+	ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
+	ha->mr.fcport.scan_state = 1;
+
+	/* Set the SG table size based on ISP type */
+	if (!IS_FWI2_CAPABLE(ha)) {
+		if (IS_QLA2100(ha))
+			host->sg_tablesize = 32;
+	} else {
+		if (!IS_QLA82XX(ha))
+			host->sg_tablesize = QLA_SG_ALL;
+	}
+	host->max_id = ha->max_fibre_devices;
+	host->cmd_per_lun = 3;
+	host->unique_id = host->host_no;
+	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
+		host->max_cmd_len = 32;
+	else
+		host->max_cmd_len = MAX_CMDSZ;
+	host->max_channel = MAX_BUSES - 1;
+	/* Older HBAs support only 16-bit LUNs */
+	if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
+	    ql2xmaxlun > 0xffff)
+		host->max_lun = 0xffff;
+	else
+		host->max_lun = ql2xmaxlun;
+	host->transportt = qla2xxx_transport_template;
+	sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
+
+	ql_dbg(ql_dbg_init, base_vha, 0x0033,
+	    "max_id=%d this_id=%d "
+	    "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
+	    "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
+	    host->this_id, host->cmd_per_lun, host->unique_id,
+	    host->max_cmd_len, host->max_channel, host->max_lun,
+	    host->transportt, sht->vendor_id);
+
+	INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
+	/* Set up the irqs */
+	ret = qla2x00_request_irqs(ha, rsp);
+	if (ret)
+		goto probe_failed;
+
+	/* Alloc arrays of request and response ring ptrs */
+	ret = qla2x00_alloc_queues(ha, req, rsp);
+	if (ret) {
+		ql_log(ql_log_fatal, base_vha, 0x003d,
+		    "Failed to allocate memory for queue pointers..."
+		    "aborting.\n");
+		goto probe_failed;
+	}
+
+	if (ha->mqenable && shost_use_blk_mq(host)) {
+		/* number of hardware queues supported by blk/scsi-mq*/
+		host->nr_hw_queues = ha->max_qpairs;
+
+		ql_dbg(ql_dbg_init, base_vha, 0x0192,
+			"blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+	} else
+		ql_dbg(ql_dbg_init, base_vha, 0x0193,
+			"blk/scsi-mq disabled.\n");
+
+	qlt_probe_one_stage1(base_vha, ha);
+
+	pci_save_state(pdev);
+
+	/* Assign back pointers */
+	rsp->req = req;
+	req->rsp = rsp;
+
+	if (IS_QLAFX00(ha)) {
+		ha->rsp_q_map[0] = rsp;
+		ha->req_q_map[0] = req;
+		set_bit(0, ha->req_qid_map);
+		set_bit(0, ha->rsp_qid_map);
+	}
+
+	/* FWI2-capable only. */
+	req->req_q_in = &ha->iobase->isp24.req_q_in;
+	req->req_q_out = &ha->iobase->isp24.req_q_out;
+	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
+	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
+	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
+		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
+		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
+		rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
+	}
+
+	if (IS_QLAFX00(ha)) {
+		req->req_q_in = &ha->iobase->ispfx00.req_q_in;
+		req->req_q_out = &ha->iobase->ispfx00.req_q_out;
+		rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
+		rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
+	}
+
+	if (IS_P3P_TYPE(ha)) {
+		req->req_q_out = &ha->iobase->isp82.req_q_out[0];
+		rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
+		rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
+	}
+
+	ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
+	    "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+	    ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+	ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
+	    "req->req_q_in=%p req->req_q_out=%p "
+	    "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+	    req->req_q_in, req->req_q_out,
+	    rsp->rsp_q_in, rsp->rsp_q_out);
+	ql_dbg(ql_dbg_init, base_vha, 0x003e,
+	    "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+	    ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+	ql_dbg(ql_dbg_init, base_vha, 0x003f,
+	    "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+	    req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
+
+	if (ha->isp_ops->initialize_adapter(base_vha)) {
+		ql_log(ql_log_fatal, base_vha, 0x00d6,
+		    "Failed to initialize adapter - Adapter flags %x.\n",
+		    base_vha->device_flags);
+
+		if (IS_QLA82XX(ha)) {
+			qla82xx_idc_lock(ha);
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+				QLA8XXX_DEV_FAILED);
+			qla82xx_idc_unlock(ha);
+			ql_log(ql_log_fatal, base_vha, 0x00d7,
+			    "HW State: FAILED.\n");
+		} else if (IS_QLA8044(ha)) {
+			qla8044_idc_lock(ha);
+			qla8044_wr_direct(base_vha,
+				QLA8044_CRB_DEV_STATE_INDEX,
+				QLA8XXX_DEV_FAILED);
+			qla8044_idc_unlock(ha);
+			ql_log(ql_log_fatal, base_vha, 0x0150,
+			    "HW State: FAILED.\n");
+		}
+
+		ret = -ENODEV;
+		goto probe_failed;
+	}
+
+	if (IS_QLAFX00(ha))
+		host->can_queue = QLAFX00_MAX_CANQUEUE;
+	else
+		host->can_queue = req->num_outstanding_cmds - 10;
+
+	ql_dbg(ql_dbg_init, base_vha, 0x0032,
+	    "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+	    host->can_queue, base_vha->req,
+	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+
+	ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
+	if (unlikely(!ha->wq)) {
+		ret = -ENOMEM;
+		goto probe_failed;
+	}
+
+	if (ha->mqenable) {
+		bool mq = false;
+		bool startit = false;
+
+		if (QLA_TGT_MODE_ENABLED()) {
+			mq = true;
+			startit = false;
+		}
+
+		if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
+		    shost_use_blk_mq(host)) {
+			mq = true;
+			startit = true;
+		}
+
+		if (mq) {
+			/* Create start of day qpairs for Block MQ */
+			for (i = 0; i < ha->max_qpairs; i++)
+				qla2xxx_create_qpair(base_vha, 5, 0, startit);
+		}
+	}
+
+	if (ha->flags.running_gold_fw)
+		goto skip_dpc;
+
+	/*
+	 * Startup the kernel thread for this host adapter
+	 */
+	ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
+	    "%s_dpc", base_vha->host_str);
+	if (IS_ERR(ha->dpc_thread)) {
+		ql_log(ql_log_fatal, base_vha, 0x00ed,
+		    "Failed to start DPC thread.\n");
+		ret = PTR_ERR(ha->dpc_thread);
+		ha->dpc_thread = NULL;
+		goto probe_failed;
+	}
+	ql_dbg(ql_dbg_init, base_vha, 0x00ee,
+	    "DPC thread started successfully.\n");
+
+	/*
+	 * If we're not coming up in initiator mode, we might sit for
+	 * a while without waking up the dpc thread, which leads to a
+	 * stuck process warning.  So just kick the dpc once here and
+	 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+	 */
+	qla2xxx_wake_dpc(base_vha);
+
+	INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
+
+	if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
+		sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
+		ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
+		INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
+
+		sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
+		ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
+		INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
+		INIT_WORK(&ha->idc_state_handler,
+		    qla83xx_idc_state_handler_work);
+		INIT_WORK(&ha->nic_core_unrecoverable,
+		    qla83xx_nic_core_unrecoverable_work);
+	}
+
+skip_dpc:
+	list_add_tail(&base_vha->list, &ha->vp_list);
+	base_vha->host->irq = ha->pdev->irq;
+
+	/* Initialized the timer */
+	qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
+	ql_dbg(ql_dbg_init, base_vha, 0x00ef,
+	    "Started qla2x00_timer with "
+	    "interval=%d.\n", WATCH_INTERVAL);
+	ql_dbg(ql_dbg_init, base_vha, 0x00f0,
+	    "Detected hba at address=%p.\n",
+	    ha);
+
+	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+		if (ha->fw_attributes & BIT_4) {
+			int prot = 0, guard;
+			base_vha->flags.difdix_supported = 1;
+			ql_dbg(ql_dbg_init, base_vha, 0x00f1,
+			    "Registering for DIF/DIX type 1 and 3 protection.\n");
+			if (ql2xenabledif == 1)
+				prot = SHOST_DIX_TYPE0_PROTECTION;
+			scsi_host_set_prot(host,
+			    prot | SHOST_DIF_TYPE1_PROTECTION
+			    | SHOST_DIF_TYPE2_PROTECTION
+			    | SHOST_DIF_TYPE3_PROTECTION
+			    | SHOST_DIX_TYPE1_PROTECTION
+			    | SHOST_DIX_TYPE2_PROTECTION
+			    | SHOST_DIX_TYPE3_PROTECTION);
+
+			guard = SHOST_DIX_GUARD_CRC;
+
+			if (IS_PI_IPGUARD_CAPABLE(ha) &&
+			    (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
+				guard |= SHOST_DIX_GUARD_IP;
+
+			scsi_host_set_guard(host, guard);
+		} else
+			base_vha->flags.difdix_supported = 0;
+	}
+
+	ha->isp_ops->enable_intrs(ha);
+
+	if (IS_QLAFX00(ha)) {
+		ret = qlafx00_fx_disc(base_vha,
+			&base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+		host->sg_tablesize = (ha->mr.extended_io_enabled) ?
+		    QLA_SG_ALL : 128;
+	}
+
+	ret = scsi_add_host(host, &pdev->dev);
+	if (ret)
+		goto probe_failed;
+
+	base_vha->flags.init_done = 1;
+	base_vha->flags.online = 1;
+	ha->prev_minidump_failed = 0;
+
+	ql_dbg(ql_dbg_init, base_vha, 0x00f2,
+	    "Init done and hba is online.\n");
+
+	if (qla_ini_mode_enabled(base_vha) ||
+		qla_dual_mode_enabled(base_vha))
+		scsi_scan_host(host);
+	else
+		ql_dbg(ql_dbg_init, base_vha, 0x0122,
+			"skipping scsi_scan_host() for non-initiator port\n");
+
+	qla2x00_alloc_sysfs_attr(base_vha);
+
+	if (IS_QLAFX00(ha)) {
+		ret = qlafx00_fx_disc(base_vha,
+			&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
+
+		/* Register system information */
+		ret =  qlafx00_fx_disc(base_vha,
+			&base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
+	}
+
+	qla2x00_init_host_attr(base_vha);
+
+	qla2x00_dfs_setup(base_vha);
+
+	ql_log(ql_log_info, base_vha, 0x00fb,
+	    "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
+	ql_log(ql_log_info, base_vha, 0x00fc,
+	    "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
+	    pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
+	    pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
+	    base_vha->host_no,
+	    ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
+
+	qlt_add_target(ha, base_vha);
+
+	clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
+
+	if (test_bit(UNLOADING, &base_vha->dpc_flags))
+		return -ENODEV;
+
+	if (ha->flags.detected_lr_sfp) {
+		ql_log(ql_log_info, base_vha, 0xffff,
+		    "Reset chip to pick up LR SFP setting\n");
+		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
+	}
+
+	return 0;
+
+probe_failed:
+	if (base_vha->timer_active)
+		qla2x00_stop_timer(base_vha);
+	base_vha->flags.online = 0;
+	if (ha->dpc_thread) {
+		struct task_struct *t = ha->dpc_thread;
+
+		ha->dpc_thread = NULL;
+		kthread_stop(t);
+	}
+
+	qla2x00_free_device(base_vha);
+	scsi_host_put(base_vha->host);
+	/*
+	 * Need to NULL out local req/rsp after
+	 * qla2x00_free_device => qla2x00_free_queues frees
+	 * what these are pointing to. Or else we'll
+	 * fall over below in qla2x00_free_req/rsp_que.
+	 */
+	req = NULL;
+	rsp = NULL;
+
+probe_hw_failed:
+	qla2x00_mem_free(ha);
+	qla2x00_free_req_que(ha, req);
+	qla2x00_free_rsp_que(ha, rsp);
+	qla2x00_clear_drv_active(ha);
+
+iospace_config_failed:
+	if (IS_P3P_TYPE(ha)) {
+		if (!ha->nx_pcibase)
+			iounmap((device_reg_t *)ha->nx_pcibase);
+		if (!ql2xdbwr)
+			iounmap((device_reg_t *)ha->nxdb_wr_ptr);
+	} else {
+		if (ha->iobase)
+			iounmap(ha->iobase);
+		if (ha->cregbase)
+			iounmap(ha->cregbase);
+	}
+	pci_release_selected_regions(ha->pdev, ha->bars);
+	kfree(ha);
+
+disable_device:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static void
+qla2x00_shutdown(struct pci_dev *pdev)
+{
+	scsi_qla_host_t *vha;
+	struct qla_hw_data  *ha;
+
+	vha = pci_get_drvdata(pdev);
+	ha = vha->hw;
+
+	ql_log(ql_log_info, vha, 0xfffa,
+		"Adapter shutdown\n");
+
+	/*
+	 * Prevent future board_disable and wait
+	 * until any pending board_disable has completed.
+	 */
+	set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+	cancel_work_sync(&ha->board_disable);
+
+	if (!atomic_read(&pdev->enable_cnt))
+		return;
+
+	/* Notify ISPFX00 firmware */
+	if (IS_QLAFX00(ha))
+		qlafx00_driver_shutdown(vha, 20);
+
+	/* Turn-off FCE trace */
+	if (ha->flags.fce_enabled) {
+		qla2x00_disable_fce_trace(vha, NULL, NULL);
+		ha->flags.fce_enabled = 0;
+	}
+
+	/* Turn-off EFT trace */
+	if (ha->eft)
+		qla2x00_disable_eft_trace(vha);
+
+	/* Stop currently executing firmware. */
+	qla2x00_try_to_stop_firmware(vha);
+
+	/* Disable timer */
+	if (vha->timer_active)
+		qla2x00_stop_timer(vha);
+
+	/* Turn adapter off line */
+	vha->flags.online = 0;
+
+	/* turn-off interrupts on the card */
+	if (ha->interrupts_on) {
+		vha->flags.init_done = 0;
+		ha->isp_ops->disable_intrs(ha);
+	}
+
+	qla2x00_free_irqs(vha);
+
+	qla2x00_free_fw_dump(ha);
+
+	pci_disable_device(pdev);
+	ql_log(ql_log_info, vha, 0xfffe,
+		"Adapter shutdown successfully.\n");
+}
+
+/* Deletes all the virtual ports for a given ha */
+static void
+qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
+{
+	scsi_qla_host_t *vha;
+	unsigned long flags;
+
+	mutex_lock(&ha->vport_lock);
+	while (ha->cur_vport_count) {
+		spin_lock_irqsave(&ha->vport_slock, flags);
+
+		BUG_ON(base_vha->list.next == &ha->vp_list);
+		/* This assumes first entry in ha->vp_list is always base vha */
+		vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
+		scsi_host_get(vha->host);
+
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+		mutex_unlock(&ha->vport_lock);
+
+		fc_vport_terminate(vha->fc_vport);
+		scsi_host_put(vha->host);
+
+		mutex_lock(&ha->vport_lock);
+	}
+	mutex_unlock(&ha->vport_lock);
+}
+
+/* Stops all deferred work threads */
+static void
+qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
+{
+	/* Cancel all work and destroy DPC workqueues */
+	if (ha->dpc_lp_wq) {
+		cancel_work_sync(&ha->idc_aen);
+		destroy_workqueue(ha->dpc_lp_wq);
+		ha->dpc_lp_wq = NULL;
+	}
+
+	if (ha->dpc_hp_wq) {
+		cancel_work_sync(&ha->nic_core_reset);
+		cancel_work_sync(&ha->idc_state_handler);
+		cancel_work_sync(&ha->nic_core_unrecoverable);
+		destroy_workqueue(ha->dpc_hp_wq);
+		ha->dpc_hp_wq = NULL;
+	}
+
+	/* Kill the kernel thread for this host */
+	if (ha->dpc_thread) {
+		struct task_struct *t = ha->dpc_thread;
+
+		/*
+		 * qla2xxx_wake_dpc checks for ->dpc_thread
+		 * so we need to zero it out.
+		 */
+		ha->dpc_thread = NULL;
+		kthread_stop(t);
+	}
+}
+
+static void
+qla2x00_unmap_iobases(struct qla_hw_data *ha)
+{
+	if (IS_QLA82XX(ha)) {
+
+		iounmap((device_reg_t *)ha->nx_pcibase);
+		if (!ql2xdbwr)
+			iounmap((device_reg_t *)ha->nxdb_wr_ptr);
+	} else {
+		if (ha->iobase)
+			iounmap(ha->iobase);
+
+		if (ha->cregbase)
+			iounmap(ha->cregbase);
+
+		if (ha->mqiobase)
+			iounmap(ha->mqiobase);
+
+		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
+			iounmap(ha->msixbase);
+	}
+}
+
+static void
+qla2x00_clear_drv_active(struct qla_hw_data *ha)
+{
+	if (IS_QLA8044(ha)) {
+		qla8044_idc_lock(ha);
+		qla8044_clear_drv_active(ha);
+		qla8044_idc_unlock(ha);
+	} else if (IS_QLA82XX(ha)) {
+		qla82xx_idc_lock(ha);
+		qla82xx_clear_drv_active(ha);
+		qla82xx_idc_unlock(ha);
+	}
+}
+
+static void
+qla2x00_remove_one(struct pci_dev *pdev)
+{
+	scsi_qla_host_t *base_vha;
+	struct qla_hw_data  *ha;
+
+	base_vha = pci_get_drvdata(pdev);
+	ha = base_vha->hw;
+
+	/* Indicate device removal to prevent future board_disable and wait
+	 * until any pending board_disable has completed. */
+	set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+	cancel_work_sync(&ha->board_disable);
+
+	/*
+	 * If the PCI device is disabled then there was a PCI-disconnect and
+	 * qla2x00_disable_board_on_pci_error has taken care of most of the
+	 * resources.
+	 */
+	if (!atomic_read(&pdev->enable_cnt)) {
+		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+		    base_vha->gnl.l, base_vha->gnl.ldma);
+
+		scsi_host_put(base_vha->host);
+		kfree(ha);
+		pci_set_drvdata(pdev, NULL);
+		return;
+	}
+	qla2x00_wait_for_hba_ready(base_vha);
+
+	qla2x00_wait_for_sess_deletion(base_vha);
+
+	/*
+	 * if UNLOAD flag is already set, then continue unload,
+	 * where it was set first.
+	 */
+	if (test_bit(UNLOADING, &base_vha->dpc_flags))
+		return;
+
+	set_bit(UNLOADING, &base_vha->dpc_flags);
+
+	qla_nvme_delete(base_vha);
+
+	dma_free_coherent(&ha->pdev->dev,
+		base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+
+	if (IS_QLAFX00(ha))
+		qlafx00_driver_shutdown(base_vha, 20);
+
+	qla2x00_delete_all_vps(ha, base_vha);
+
+	if (IS_QLA8031(ha)) {
+		ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+		    "Clearing fcoe driver presence.\n");
+		if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+			    "Error while clearing DRV-Presence.\n");
+	}
+
+	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+	qla2x00_dfs_remove(base_vha);
+
+	qla84xx_put_chip(base_vha);
+
+	/* Laser should be disabled only for ISP2031 */
+	if (IS_QLA2031(ha))
+		qla83xx_disable_laser(base_vha);
+
+	/* Disable timer */
+	if (base_vha->timer_active)
+		qla2x00_stop_timer(base_vha);
+
+	base_vha->flags.online = 0;
+
+	/* free DMA memory */
+	if (ha->exlogin_buf)
+		qla2x00_free_exlogin_buffer(ha);
+
+	/* free DMA memory */
+	if (ha->exchoffld_buf)
+		qla2x00_free_exchoffld_buffer(ha);
+
+	qla2x00_destroy_deferred_work(ha);
+
+	qlt_remove_target(ha, base_vha);
+
+	qla2x00_free_sysfs_attr(base_vha, true);
+
+	fc_remove_host(base_vha->host);
+	qlt_remove_target_resources(ha);
+
+	scsi_remove_host(base_vha->host);
+
+	qla2x00_free_device(base_vha);
+
+	qla2x00_clear_drv_active(ha);
+
+	scsi_host_put(base_vha->host);
+
+	qla2x00_unmap_iobases(ha);
+
+	pci_release_selected_regions(ha->pdev, ha->bars);
+	kfree(ha);
+
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+static void
+qla2x00_free_device(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+
+	/* Disable timer */
+	if (vha->timer_active)
+		qla2x00_stop_timer(vha);
+
+	qla25xx_delete_queues(vha);
+
+	if (ha->flags.fce_enabled)
+		qla2x00_disable_fce_trace(vha, NULL, NULL);
+
+	if (ha->eft)
+		qla2x00_disable_eft_trace(vha);
+
+	/* Stop currently executing firmware. */
+	qla2x00_try_to_stop_firmware(vha);
+
+	vha->flags.online = 0;
+
+	/* turn-off interrupts on the card */
+	if (ha->interrupts_on) {
+		vha->flags.init_done = 0;
+		ha->isp_ops->disable_intrs(ha);
+	}
+
+	qla2x00_free_fcports(vha);
+
+	qla2x00_free_irqs(vha);
+
+	/* Flush the work queue and remove it */
+	if (ha->wq) {
+		flush_workqueue(ha->wq);
+		destroy_workqueue(ha->wq);
+		ha->wq = NULL;
+	}
+
+
+	qla2x00_mem_free(ha);
+
+	qla82xx_md_free(vha);
+
+	qla2x00_free_queues(ha);
+}
+
+void qla2x00_free_fcports(struct scsi_qla_host *vha)
+{
+	fc_port_t *fcport, *tfcport;
+
+	list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
+		list_del(&fcport->list);
+		qla2x00_clear_loop_id(fcport);
+		kfree(fcport);
+	}
+}
+
+static inline void
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
+    int defer)
+{
+	struct fc_rport *rport;
+	scsi_qla_host_t *base_vha;
+	unsigned long flags;
+
+	if (!fcport->rport)
+		return;
+
+	rport = fcport->rport;
+	if (defer) {
+		base_vha = pci_get_drvdata(vha->hw->pdev);
+		spin_lock_irqsave(vha->host->host_lock, flags);
+		fcport->drport = rport;
+		spin_unlock_irqrestore(vha->host->host_lock, flags);
+		qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
+		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
+	} else {
+		int now;
+		if (rport) {
+			ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+			    "%s %8phN. rport %p roles %x\n",
+			    __func__, fcport->port_name, rport,
+			    rport->roles);
+			fc_remote_port_delete(rport);
+		}
+		qlt_do_generation_tick(vha, &now);
+	}
+}
+
+/*
+ * qla2x00_mark_device_lost Updates fcport state when device goes offline.
+ *
+ * Input: ha = adapter block pointer.  fcport = port structure pointer.
+ *
+ * Return: None.
+ *
+ * Context:
+ */
+void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
+    int do_login, int defer)
+{
+	if (IS_QLAFX00(vha->hw)) {
+		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+		qla2x00_schedule_rport_del(vha, fcport, defer);
+		return;
+	}
+
+	if (atomic_read(&fcport->state) == FCS_ONLINE &&
+	    vha->vp_idx == fcport->vha->vp_idx) {
+		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+		qla2x00_schedule_rport_del(vha, fcport, defer);
+	}
+	/*
+	 * We may need to retry the login, so don't change the state of the
+	 * port but do the retries.
+	 */
+	if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
+		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+
+	if (!do_login)
+		return;
+
+	set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+	if (fcport->login_retry == 0) {
+		fcport->login_retry = vha->hw->login_retry_count;
+
+		ql_dbg(ql_dbg_disc, vha, 0x20a3,
+		    "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+		    fcport->port_name, fcport->loop_id, fcport->login_retry);
+	}
+}
+
+/*
+ * qla2x00_mark_all_devices_lost
+ *	Updates fcport state when device goes offline.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	fcport = port structure pointer.
+ *
+ * Return:
+ *	None.
+ *
+ * Context:
+ */
+void
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
+{
+	fc_port_t *fcport;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20f1,
+	    "Mark all dev lost\n");
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		fcport->scan_state = 0;
+		qlt_schedule_sess_for_deletion_lock(fcport);
+
+		if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
+			continue;
+
+		/*
+		 * No point in marking the device as lost, if the device is
+		 * already DEAD.
+		 */
+		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
+			continue;
+		if (atomic_read(&fcport->state) == FCS_ONLINE) {
+			qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+			if (defer)
+				qla2x00_schedule_rport_del(vha, fcport, defer);
+			else if (vha->vp_idx == fcport->vha->vp_idx)
+				qla2x00_schedule_rport_del(vha, fcport, defer);
+		}
+	}
+}
+
+/*
+* qla2x00_mem_alloc
+*      Allocates adapter memory.
+*
+* Returns:
+*      0  = success.
+*      !0  = failure.
+*/
+static int
+qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+	struct req_que **req, struct rsp_que **rsp)
+{
+	char	name[16];
+
+	ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
+		&ha->init_cb_dma, GFP_KERNEL);
+	if (!ha->init_cb)
+		goto fail;
+
+	if (qlt_mem_alloc(ha) < 0)
+		goto fail_free_init_cb;
+
+	ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
+		qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
+	if (!ha->gid_list)
+		goto fail_free_tgt_mem;
+
+	ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+	if (!ha->srb_mempool)
+		goto fail_free_gid_list;
+
+	if (IS_P3P_TYPE(ha)) {
+		/* Allocate cache for CT6 Ctx. */
+		if (!ctx_cachep) {
+			ctx_cachep = kmem_cache_create("qla2xxx_ctx",
+				sizeof(struct ct6_dsd), 0,
+				SLAB_HWCACHE_ALIGN, NULL);
+			if (!ctx_cachep)
+				goto fail_free_srb_mempool;
+		}
+		ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+			ctx_cachep);
+		if (!ha->ctx_mempool)
+			goto fail_free_srb_mempool;
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
+		    "ctx_cachep=%p ctx_mempool=%p.\n",
+		    ctx_cachep, ha->ctx_mempool);
+	}
+
+	/* Get memory for cached NVRAM */
+	ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
+	if (!ha->nvram)
+		goto fail_free_ctx_mempool;
+
+	snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
+		ha->pdev->device);
+	ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+		DMA_POOL_SIZE, 8, 0);
+	if (!ha->s_dma_pool)
+		goto fail_free_nvram;
+
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
+	    "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
+	    ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
+
+	if (IS_P3P_TYPE(ha) || ql2xenabledif) {
+		ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+			DSD_LIST_DMA_POOL_SIZE, 8, 0);
+		if (!ha->dl_dma_pool) {
+			ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
+			    "Failed to allocate memory for dl_dma_pool.\n");
+			goto fail_s_dma_pool;
+		}
+
+		ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+			FCP_CMND_DMA_POOL_SIZE, 8, 0);
+		if (!ha->fcp_cmnd_dma_pool) {
+			ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
+			    "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
+			goto fail_dl_dma_pool;
+		}
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
+		    "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
+		    ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
+	}
+
+	/* Allocate memory for SNS commands */
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+	/* Get consistent memory allocated for SNS commands */
+		ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
+		sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
+		if (!ha->sns_cmd)
+			goto fail_dma_pool;
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
+		    "sns_cmd: %p.\n", ha->sns_cmd);
+	} else {
+	/* Get consistent memory allocated for MS IOCB */
+		ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+			&ha->ms_iocb_dma);
+		if (!ha->ms_iocb)
+			goto fail_dma_pool;
+	/* Get consistent memory allocated for CT SNS commands */
+		ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
+			sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
+		if (!ha->ct_sns)
+			goto fail_free_ms_iocb;
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
+		    "ms_iocb=%p ct_sns=%p.\n",
+		    ha->ms_iocb, ha->ct_sns);
+	}
+
+	/* Allocate memory for request ring */
+	*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+	if (!*req) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
+		    "Failed to allocate memory for req.\n");
+		goto fail_req;
+	}
+	(*req)->length = req_len;
+	(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
+		((*req)->length + 1) * sizeof(request_t),
+		&(*req)->dma, GFP_KERNEL);
+	if (!(*req)->ring) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
+		    "Failed to allocate memory for req_ring.\n");
+		goto fail_req_ring;
+	}
+	/* Allocate memory for response ring */
+	*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+	if (!*rsp) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
+		    "Failed to allocate memory for rsp.\n");
+		goto fail_rsp;
+	}
+	(*rsp)->hw = ha;
+	(*rsp)->length = rsp_len;
+	(*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
+		((*rsp)->length + 1) * sizeof(response_t),
+		&(*rsp)->dma, GFP_KERNEL);
+	if (!(*rsp)->ring) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
+		    "Failed to allocate memory for rsp_ring.\n");
+		goto fail_rsp_ring;
+	}
+	(*req)->rsp = *rsp;
+	(*rsp)->req = *req;
+	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
+	    "req=%p req->length=%d req->ring=%p rsp=%p "
+	    "rsp->length=%d rsp->ring=%p.\n",
+	    *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
+	    (*rsp)->ring);
+	/* Allocate memory for NVRAM data for vports */
+	if (ha->nvram_npiv_size) {
+		ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
+		    ha->nvram_npiv_size, GFP_KERNEL);
+		if (!ha->npiv_info) {
+			ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
+			    "Failed to allocate memory for npiv_info.\n");
+			goto fail_npiv_info;
+		}
+	} else
+		ha->npiv_info = NULL;
+
+	/* Get consistent memory allocated for EX-INIT-CB. */
+	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+		    &ha->ex_init_cb_dma);
+		if (!ha->ex_init_cb)
+			goto fail_ex_init_cb;
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
+		    "ex_init_cb=%p.\n", ha->ex_init_cb);
+	}
+
+	INIT_LIST_HEAD(&ha->gbl_dsd_list);
+
+	/* Get consistent memory allocated for Async Port-Database. */
+	if (!IS_FWI2_CAPABLE(ha)) {
+		ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+			&ha->async_pd_dma);
+		if (!ha->async_pd)
+			goto fail_async_pd;
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
+		    "async_pd=%p.\n", ha->async_pd);
+	}
+
+	INIT_LIST_HEAD(&ha->vp_list);
+
+	/* Allocate memory for our loop_id bitmap */
+	ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+	    GFP_KERNEL);
+	if (!ha->loop_id_map)
+		goto fail_loop_id_map;
+	else {
+		qla2x00_set_reserved_loop_ids(ha);
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+		    "loop_id_map=%p.\n", ha->loop_id_map);
+	}
+
+	ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
+	    SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
+	if (!ha->sfp_data) {
+		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
+		    "Unable to allocate memory for SFP read-data.\n");
+		goto fail_sfp_data;
+	}
+
+	return 0;
+
+fail_sfp_data:
+	kfree(ha->loop_id_map);
+fail_loop_id_map:
+	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+fail_async_pd:
+	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+fail_ex_init_cb:
+	kfree(ha->npiv_info);
+fail_npiv_info:
+	dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
+		sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
+	(*rsp)->ring = NULL;
+	(*rsp)->dma = 0;
+fail_rsp_ring:
+	kfree(*rsp);
+	*rsp = NULL;
+fail_rsp:
+	dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
+		sizeof(request_t), (*req)->ring, (*req)->dma);
+	(*req)->ring = NULL;
+	(*req)->dma = 0;
+fail_req_ring:
+	kfree(*req);
+	*req = NULL;
+fail_req:
+	dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+		ha->ct_sns, ha->ct_sns_dma);
+	ha->ct_sns = NULL;
+	ha->ct_sns_dma = 0;
+fail_free_ms_iocb:
+	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+	ha->ms_iocb = NULL;
+	ha->ms_iocb_dma = 0;
+
+	if (ha->sns_cmd)
+		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+		    ha->sns_cmd, ha->sns_cmd_dma);
+fail_dma_pool:
+	if (IS_QLA82XX(ha) || ql2xenabledif) {
+		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+		ha->fcp_cmnd_dma_pool = NULL;
+	}
+fail_dl_dma_pool:
+	if (IS_QLA82XX(ha) || ql2xenabledif) {
+		dma_pool_destroy(ha->dl_dma_pool);
+		ha->dl_dma_pool = NULL;
+	}
+fail_s_dma_pool:
+	dma_pool_destroy(ha->s_dma_pool);
+	ha->s_dma_pool = NULL;
+fail_free_nvram:
+	kfree(ha->nvram);
+	ha->nvram = NULL;
+fail_free_ctx_mempool:
+	if (ha->ctx_mempool)
+		mempool_destroy(ha->ctx_mempool);
+	ha->ctx_mempool = NULL;
+fail_free_srb_mempool:
+	if (ha->srb_mempool)
+		mempool_destroy(ha->srb_mempool);
+	ha->srb_mempool = NULL;
+fail_free_gid_list:
+	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+	ha->gid_list,
+	ha->gid_list_dma);
+	ha->gid_list = NULL;
+	ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+	qlt_mem_free(ha);
+fail_free_init_cb:
+	dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
+	ha->init_cb_dma);
+	ha->init_cb = NULL;
+	ha->init_cb_dma = 0;
+fail:
+	ql_log(ql_log_fatal, NULL, 0x0030,
+	    "Memory allocation failure.\n");
+	return -ENOMEM;
+}
+
+int
+qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
+{
+	int rval;
+	uint16_t	size, max_cnt, temp;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Return if we don't need to alloacate any extended logins */
+	if (!ql2xexlogins)
+		return QLA_SUCCESS;
+
+	if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
+		return QLA_SUCCESS;
+
+	ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
+	max_cnt = 0;
+	rval = qla_get_exlogin_status(vha, &size, &max_cnt);
+	if (rval != QLA_SUCCESS) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
+		    "Failed to get exlogin status.\n");
+		return rval;
+	}
+
+	temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
+	temp *= size;
+
+	if (temp != ha->exlogin_size) {
+		qla2x00_free_exlogin_buffer(ha);
+		ha->exlogin_size = temp;
+
+		ql_log(ql_log_info, vha, 0xd024,
+		    "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
+		    max_cnt, size, temp);
+
+		ql_log(ql_log_info, vha, 0xd025,
+		    "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
+
+		/* Get consistent memory for extended logins */
+		ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
+			ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
+		if (!ha->exlogin_buf) {
+			ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
+		    "Failed to allocate memory for exlogin_buf_dma.\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* Now configure the dma buffer */
+	rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
+	if (rval) {
+		ql_log(ql_log_fatal, vha, 0xd033,
+		    "Setup extended login buffer  ****FAILED****.\n");
+		qla2x00_free_exlogin_buffer(ha);
+	}
+
+	return rval;
+}
+
+/*
+* qla2x00_free_exlogin_buffer
+*
+* Input:
+*	ha = adapter block pointer
+*/
+void
+qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
+{
+	if (ha->exlogin_buf) {
+		dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
+		    ha->exlogin_buf, ha->exlogin_buf_dma);
+		ha->exlogin_buf = NULL;
+		ha->exlogin_size = 0;
+	}
+}
+
+static void
+qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
+{
+	u32 temp;
+	*ret_cnt = FW_DEF_EXCHANGES_CNT;
+
+	if (qla_ini_mode_enabled(vha)) {
+		if (ql2xiniexchg > max_cnt)
+			ql2xiniexchg = max_cnt;
+
+		if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+			*ret_cnt = ql2xiniexchg;
+	} else if (qla_tgt_mode_enabled(vha)) {
+		if (ql2xexchoffld > max_cnt)
+			ql2xexchoffld = max_cnt;
+
+		if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+			*ret_cnt = ql2xexchoffld;
+	} else if (qla_dual_mode_enabled(vha)) {
+		temp = ql2xiniexchg + ql2xexchoffld;
+		if (temp > max_cnt) {
+			ql2xiniexchg -= (temp - max_cnt)/2;
+			ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+			temp = max_cnt;
+		}
+
+		if (temp > FW_DEF_EXCHANGES_CNT)
+			*ret_cnt = temp;
+	}
+}
+
+int
+qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
+{
+	int rval;
+	u16 size, max_cnt;
+	u32 temp;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->flags.exchoffld_enabled)
+		return QLA_SUCCESS;
+
+	if (!IS_EXCHG_OFFLD_CAPABLE(ha))
+		return QLA_SUCCESS;
+
+	max_cnt = 0;
+	rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
+	if (rval != QLA_SUCCESS) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
+		    "Failed to get exlogin status.\n");
+		return rval;
+	}
+
+	qla2x00_number_of_exch(vha, &temp, max_cnt);
+	temp *= size;
+
+	if (temp != ha->exchoffld_size) {
+		qla2x00_free_exchoffld_buffer(ha);
+		ha->exchoffld_size = temp;
+
+		ql_log(ql_log_info, vha, 0xd016,
+		    "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
+		    max_cnt, size, temp);
+
+		ql_log(ql_log_info, vha, 0xd017,
+		    "Exchange Buffers requested size = 0x%x\n",
+		    ha->exchoffld_size);
+
+		/* Get consistent memory for extended logins */
+		ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
+			ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
+		if (!ha->exchoffld_buf) {
+			ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+			"Failed to allocate memory for exchoffld_buf_dma.\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* Now configure the dma buffer */
+	rval = qla_set_exchoffld_mem_cfg(vha);
+	if (rval) {
+		ql_log(ql_log_fatal, vha, 0xd02e,
+		    "Setup exchange offload buffer ****FAILED****.\n");
+		qla2x00_free_exchoffld_buffer(ha);
+	} else {
+		/* re-adjust number of target exchange */
+		struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
+
+		if (qla_ini_mode_enabled(vha))
+			icb->exchange_count = 0;
+		else
+			icb->exchange_count = cpu_to_le16(ql2xexchoffld);
+	}
+
+	return rval;
+}
+
+/*
+* qla2x00_free_exchoffld_buffer
+*
+* Input:
+*	ha = adapter block pointer
+*/
+void
+qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
+{
+	if (ha->exchoffld_buf) {
+		dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
+		    ha->exchoffld_buf, ha->exchoffld_buf_dma);
+		ha->exchoffld_buf = NULL;
+		ha->exchoffld_size = 0;
+	}
+}
+
+/*
+* qla2x00_free_fw_dump
+*	Frees fw dump stuff.
+*
+* Input:
+*	ha = adapter block pointer
+*/
+static void
+qla2x00_free_fw_dump(struct qla_hw_data *ha)
+{
+	if (ha->fce)
+		dma_free_coherent(&ha->pdev->dev,
+		    FCE_SIZE, ha->fce, ha->fce_dma);
+
+	if (ha->eft)
+		dma_free_coherent(&ha->pdev->dev,
+		    EFT_SIZE, ha->eft, ha->eft_dma);
+
+	if (ha->fw_dump)
+		vfree(ha->fw_dump);
+	if (ha->fw_dump_template)
+		vfree(ha->fw_dump_template);
+
+	ha->fce = NULL;
+	ha->fce_dma = 0;
+	ha->eft = NULL;
+	ha->eft_dma = 0;
+	ha->fw_dumped = 0;
+	ha->fw_dump_cap_flags = 0;
+	ha->fw_dump_reading = 0;
+	ha->fw_dump = NULL;
+	ha->fw_dump_len = 0;
+	ha->fw_dump_template = NULL;
+	ha->fw_dump_template_len = 0;
+}
+
+/*
+* qla2x00_mem_free
+*      Frees all adapter allocated memory.
+*
+* Input:
+*      ha = adapter block pointer.
+*/
+static void
+qla2x00_mem_free(struct qla_hw_data *ha)
+{
+	qla2x00_free_fw_dump(ha);
+
+	if (ha->mctp_dump)
+		dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
+		    ha->mctp_dump_dma);
+
+	if (ha->srb_mempool)
+		mempool_destroy(ha->srb_mempool);
+
+	if (ha->dcbx_tlv)
+		dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+		    ha->dcbx_tlv, ha->dcbx_tlv_dma);
+
+	if (ha->xgmac_data)
+		dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+		    ha->xgmac_data, ha->xgmac_data_dma);
+
+	if (ha->sns_cmd)
+		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+		ha->sns_cmd, ha->sns_cmd_dma);
+
+	if (ha->ct_sns)
+		dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+		ha->ct_sns, ha->ct_sns_dma);
+
+	if (ha->sfp_data)
+		dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
+		    ha->sfp_data_dma);
+
+	if (ha->ms_iocb)
+		dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+
+	if (ha->ex_init_cb)
+		dma_pool_free(ha->s_dma_pool,
+			ha->ex_init_cb, ha->ex_init_cb_dma);
+
+	if (ha->async_pd)
+		dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+
+	if (ha->s_dma_pool)
+		dma_pool_destroy(ha->s_dma_pool);
+
+	if (ha->gid_list)
+		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+		ha->gid_list, ha->gid_list_dma);
+
+	if (IS_QLA82XX(ha)) {
+		if (!list_empty(&ha->gbl_dsd_list)) {
+			struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+			/* clean up allocated prev pool */
+			list_for_each_entry_safe(dsd_ptr,
+				tdsd_ptr, &ha->gbl_dsd_list, list) {
+				dma_pool_free(ha->dl_dma_pool,
+				dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
+				list_del(&dsd_ptr->list);
+				kfree(dsd_ptr);
+			}
+		}
+	}
+
+	if (ha->dl_dma_pool)
+		dma_pool_destroy(ha->dl_dma_pool);
+
+	if (ha->fcp_cmnd_dma_pool)
+		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+
+	if (ha->ctx_mempool)
+		mempool_destroy(ha->ctx_mempool);
+
+	qlt_mem_free(ha);
+
+	if (ha->init_cb)
+		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+			ha->init_cb, ha->init_cb_dma);
+
+	vfree(ha->optrom_buffer);
+	kfree(ha->nvram);
+	kfree(ha->npiv_info);
+	kfree(ha->swl);
+	kfree(ha->loop_id_map);
+
+	ha->srb_mempool = NULL;
+	ha->ctx_mempool = NULL;
+	ha->sns_cmd = NULL;
+	ha->sns_cmd_dma = 0;
+	ha->ct_sns = NULL;
+	ha->ct_sns_dma = 0;
+	ha->ms_iocb = NULL;
+	ha->ms_iocb_dma = 0;
+	ha->init_cb = NULL;
+	ha->init_cb_dma = 0;
+	ha->ex_init_cb = NULL;
+	ha->ex_init_cb_dma = 0;
+	ha->async_pd = NULL;
+	ha->async_pd_dma = 0;
+	ha->loop_id_map = NULL;
+	ha->npiv_info = NULL;
+	ha->optrom_buffer = NULL;
+	ha->swl = NULL;
+	ha->nvram = NULL;
+	ha->mctp_dump = NULL;
+	ha->dcbx_tlv = NULL;
+	ha->xgmac_data = NULL;
+	ha->sfp_data = NULL;
+
+	ha->s_dma_pool = NULL;
+	ha->dl_dma_pool = NULL;
+	ha->fcp_cmnd_dma_pool = NULL;
+
+	ha->gid_list = NULL;
+	ha->gid_list_dma = 0;
+
+	ha->tgt.atio_ring = NULL;
+	ha->tgt.atio_dma = 0;
+	ha->tgt.tgt_vp_map = NULL;
+}
+
+struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+						struct qla_hw_data *ha)
+{
+	struct Scsi_Host *host;
+	struct scsi_qla_host *vha = NULL;
+
+	host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
+	if (!host) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
+		    "Failed to allocate host from the scsi layer, aborting.\n");
+		return NULL;
+	}
+
+	/* Clear our data area */
+	vha = shost_priv(host);
+	memset(vha, 0, sizeof(scsi_qla_host_t));
+
+	vha->host = host;
+	vha->host_no = host->host_no;
+	vha->hw = ha;
+
+	INIT_LIST_HEAD(&vha->vp_fcports);
+	INIT_LIST_HEAD(&vha->work_list);
+	INIT_LIST_HEAD(&vha->list);
+	INIT_LIST_HEAD(&vha->qla_cmd_list);
+	INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
+	INIT_LIST_HEAD(&vha->logo_list);
+	INIT_LIST_HEAD(&vha->plogi_ack_list);
+	INIT_LIST_HEAD(&vha->qp_list);
+	INIT_LIST_HEAD(&vha->gnl.fcports);
+	INIT_LIST_HEAD(&vha->nvme_rport_list);
+	INIT_LIST_HEAD(&vha->gpnid_list);
+
+	spin_lock_init(&vha->work_lock);
+	spin_lock_init(&vha->cmd_list_lock);
+	init_waitqueue_head(&vha->fcport_waitQ);
+	init_waitqueue_head(&vha->vref_waitq);
+
+	vha->gnl.size = sizeof(struct get_name_list_extended) *
+			(ha->max_loop_id + 1);
+	vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
+	    vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
+	if (!vha->gnl.l) {
+		ql_log(ql_log_fatal, vha, 0xd04a,
+		    "Alloc failed for name list.\n");
+		scsi_remove_host(vha->host);
+		return NULL;
+	}
+
+	sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+	ql_dbg(ql_dbg_init, vha, 0x0041,
+	    "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+	    vha->host, vha->hw, vha,
+	    dev_name(&(ha->pdev->dev)));
+
+	return vha;
+}
+
+struct qla_work_evt *
+qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
+{
+	struct qla_work_evt *e;
+	uint8_t bail;
+
+	QLA_VHA_MARK_BUSY(vha, bail);
+	if (bail)
+		return NULL;
+
+	e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
+	if (!e) {
+		QLA_VHA_MARK_NOT_BUSY(vha);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&e->list);
+	e->type = type;
+	e->flags = QLA_EVT_FLAG_FREE;
+	return e;
+}
+
+int
+qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->work_lock, flags);
+	list_add_tail(&e->list, &vha->work_list);
+	spin_unlock_irqrestore(&vha->work_lock, flags);
+
+	if (QLA_EARLY_LINKUP(vha->hw))
+		schedule_work(&vha->iocb_work);
+	else
+		qla2xxx_wake_dpc(vha);
+
+	return QLA_SUCCESS;
+}
+
+int
+qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
+    u32 data)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.aen.code = code;
+	e->u.aen.data = data;
+	return qla2x00_post_work(vha, e);
+}
+
+int
+qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
+	return qla2x00_post_work(vha, e);
+}
+
+#define qla2x00_post_async_work(name, type)	\
+int qla2x00_post_async_##name##_work(		\
+    struct scsi_qla_host *vha,			\
+    fc_port_t *fcport, uint16_t *data)		\
+{						\
+	struct qla_work_evt *e;			\
+						\
+	e = qla2x00_alloc_work(vha, type);	\
+	if (!e)					\
+		return QLA_FUNCTION_FAILED;	\
+						\
+	e->u.logio.fcport = fcport;		\
+	if (data) {				\
+		e->u.logio.data[0] = data[0];	\
+		e->u.logio.data[1] = data[1];	\
+	}					\
+	return qla2x00_post_work(vha, e);	\
+}
+
+qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
+qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
+qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
+qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
+qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+
+int
+qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.uevent.code = code;
+	return qla2x00_post_work(vha, e);
+}
+
+static void
+qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
+{
+	char event_string[40];
+	char *envp[] = { event_string, NULL };
+
+	switch (code) {
+	case QLA_UEVENT_CODE_FW_DUMP:
+		snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+		    vha->host_no);
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+	kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
+int
+qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
+			uint32_t *data, int cnt)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.aenfx.evtcode = evtcode;
+	e->u.aenfx.count = cnt;
+	memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
+	return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.fcport.fcport = fcport;
+	return qla2x00_post_work(vha, e);
+}
+
+static
+void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+	unsigned long flags;
+	fc_port_t *fcport =  NULL, *tfcp;
+	struct qlt_plogi_ack_t *pla =
+	    (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
+	uint8_t free_fcport = 0;
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
+	if (fcport) {
+		fcport->d_id = e->u.new_sess.id;
+		if (pla) {
+			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+			qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
+			/* we took an extra ref_count to prevent PLOGI ACK when
+			 * fcport/sess has not been created.
+			 */
+			pla->ref_count--;
+		}
+	} else {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (fcport) {
+			fcport->d_id = e->u.new_sess.id;
+			fcport->scan_state = QLA_FCPORT_FOUND;
+			fcport->flags |= FCF_FABRIC_DEVICE;
+			fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+
+			memcpy(fcport->port_name, e->u.new_sess.port_name,
+			    WWN_SIZE);
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+				   "%s %8phC mem alloc fail.\n",
+				   __func__, e->u.new_sess.port_name);
+
+			if (pla)
+				kmem_cache_free(qla_tgt_plogi_cachep, pla);
+			return;
+		}
+
+		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+		/* search again to make sure one else got ahead */
+		tfcp = qla2x00_find_fcport_by_wwpn(vha,
+		    e->u.new_sess.port_name, 1);
+		if (tfcp) {
+			/* should rarily happen */
+			ql_dbg(ql_dbg_disc, vha, 0xffff,
+			    "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
+			    __func__, tfcp->port_name, tfcp->disc_state,
+			    tfcp->fw_login_state);
+
+			free_fcport = 1;
+		} else {
+			list_add_tail(&fcport->list, &vha->vp_fcports);
+
+		}
+		if (pla) {
+			qlt_plogi_ack_link(vha, pla, fcport,
+			    QLT_PLOGI_LINK_SAME_WWN);
+			pla->ref_count--;
+		}
+	}
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	if (fcport) {
+		if (pla)
+			qlt_plogi_ack_unref(vha, pla);
+		else
+			qla24xx_async_gnl(vha, fcport);
+	}
+
+	if (free_fcport) {
+		qla2x00_free_fcport(fcport);
+		if (pla)
+			kmem_cache_free(qla_tgt_plogi_cachep, pla);
+	}
+}
+
+void
+qla2x00_do_work(struct scsi_qla_host *vha)
+{
+	struct qla_work_evt *e, *tmp;
+	unsigned long flags;
+	LIST_HEAD(work);
+
+	spin_lock_irqsave(&vha->work_lock, flags);
+	list_splice_init(&vha->work_list, &work);
+	spin_unlock_irqrestore(&vha->work_lock, flags);
+
+	list_for_each_entry_safe(e, tmp, &work, list) {
+		list_del_init(&e->list);
+
+		switch (e->type) {
+		case QLA_EVT_AEN:
+			fc_host_post_event(vha->host, fc_get_event_number(),
+			    e->u.aen.code, e->u.aen.data);
+			break;
+		case QLA_EVT_IDC_ACK:
+			qla81xx_idc_ack(vha, e->u.idc_ack.mb);
+			break;
+		case QLA_EVT_ASYNC_LOGIN:
+			qla2x00_async_login(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
+		case QLA_EVT_ASYNC_LOGOUT:
+			qla2x00_async_logout(vha, e->u.logio.fcport);
+			break;
+		case QLA_EVT_ASYNC_LOGOUT_DONE:
+			qla2x00_async_logout_done(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
+		case QLA_EVT_ASYNC_ADISC:
+			qla2x00_async_adisc(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
+		case QLA_EVT_ASYNC_ADISC_DONE:
+			qla2x00_async_adisc_done(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
+		case QLA_EVT_UEVENT:
+			qla2x00_uevent_emit(vha, e->u.uevent.code);
+			break;
+		case QLA_EVT_AENFX:
+			qlafx00_process_aen(vha, e);
+			break;
+		case QLA_EVT_GIDPN:
+			qla24xx_async_gidpn(vha, e->u.fcport.fcport);
+			break;
+		case QLA_EVT_GPNID:
+			qla24xx_async_gpnid(vha, &e->u.gpnid.id);
+			break;
+		case QLA_EVT_GPNID_DONE:
+			qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+			break;
+		case QLA_EVT_NEW_SESS:
+			qla24xx_create_new_sess(vha, e);
+			break;
+		case QLA_EVT_GPDB:
+			qla24xx_async_gpdb(vha, e->u.fcport.fcport,
+			    e->u.fcport.opt);
+			break;
+		case QLA_EVT_PRLI:
+			qla24xx_async_prli(vha, e->u.fcport.fcport);
+			break;
+		case QLA_EVT_GPSC:
+			qla24xx_async_gpsc(vha, e->u.fcport.fcport);
+			break;
+		case QLA_EVT_UPD_FCPORT:
+			qla2x00_update_fcport(vha, e->u.fcport.fcport);
+			break;
+		case QLA_EVT_GNL:
+			qla24xx_async_gnl(vha, e->u.fcport.fcport);
+			break;
+		case QLA_EVT_NACK:
+			qla24xx_do_nack_work(vha, e);
+			break;
+		}
+		if (e->flags & QLA_EVT_FLAG_FREE)
+			kfree(e);
+
+		/* For each work completed decrement vha ref count */
+		QLA_VHA_MARK_NOT_BUSY(vha);
+	}
+}
+
+/* Relogins all the fcports of a vport
+ * Context: dpc thread
+ */
+void qla2x00_relogin(struct scsi_qla_host *vha)
+{
+	fc_port_t       *fcport;
+	int status;
+	struct event_arg ea;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+	/*
+	 * If the port is not ONLINE then try to login
+	 * to it if we haven't run out of retries.
+	 */
+		if (atomic_read(&fcport->state) != FCS_ONLINE &&
+		    fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
+
+			if (fcport->flags & FCF_FABRIC_DEVICE) {
+				ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
+				    "%s %8phC DS %d LS %d\n", __func__,
+				    fcport->port_name, fcport->disc_state,
+				    fcport->fw_login_state);
+				memset(&ea, 0, sizeof(ea));
+				ea.event = FCME_RELOGIN;
+				ea.fcport = fcport;
+				qla2x00_fcport_event_handler(vha, &ea);
+			} else {
+				fcport->login_retry--;
+				status = qla2x00_local_device_login(vha,
+								fcport);
+				if (status == QLA_SUCCESS) {
+					fcport->old_loop_id = fcport->loop_id;
+					ql_dbg(ql_dbg_disc, vha, 0x2003,
+					    "Port login OK: logged in ID 0x%x.\n",
+					    fcport->loop_id);
+					qla2x00_update_fcport(vha, fcport);
+				} else if (status == 1) {
+					set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+					/* retry the login again */
+					ql_dbg(ql_dbg_disc, vha, 0x2007,
+					    "Retrying %d login again loop_id 0x%x.\n",
+					    fcport->login_retry,
+					    fcport->loop_id);
+				} else {
+					fcport->login_retry = 0;
+				}
+
+				if (fcport->login_retry == 0 &&
+				    status != QLA_SUCCESS)
+					qla2x00_clear_loop_id(fcport);
+			}
+		}
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
+	}
+}
+
+/* Schedule work on any of the dpc-workqueues */
+void
+qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
+{
+	struct qla_hw_data *ha = base_vha->hw;
+
+	switch (work_code) {
+	case MBA_IDC_AEN: /* 0x8200 */
+		if (ha->dpc_lp_wq)
+			queue_work(ha->dpc_lp_wq, &ha->idc_aen);
+		break;
+
+	case QLA83XX_NIC_CORE_RESET: /* 0x1 */
+		if (!ha->flags.nic_core_reset_hdlr_active) {
+			if (ha->dpc_hp_wq)
+				queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
+		} else
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
+			    "NIC Core reset is already active. Skip "
+			    "scheduling it again.\n");
+		break;
+	case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
+		if (ha->dpc_hp_wq)
+			queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
+		break;
+	case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
+		if (ha->dpc_hp_wq)
+			queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
+		break;
+	default:
+		ql_log(ql_log_warn, base_vha, 0xb05f,
+		    "Unknown work-code=0x%x.\n", work_code);
+	}
+
+	return;
+}
+
+/* Work: Perform NIC Core Unrecoverable state handling */
+void
+qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
+{
+	struct qla_hw_data *ha =
+		container_of(work, struct qla_hw_data, nic_core_unrecoverable);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	uint32_t dev_state = 0;
+
+	qla83xx_idc_lock(base_vha, 0);
+	qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+	qla83xx_reset_ownership(base_vha);
+	if (ha->flags.nic_core_reset_owner) {
+		ha->flags.nic_core_reset_owner = 0;
+		qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+		    QLA8XXX_DEV_FAILED);
+		ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
+		qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+	}
+	qla83xx_idc_unlock(base_vha, 0);
+}
+
+/* Work: Execute IDC state handler */
+void
+qla83xx_idc_state_handler_work(struct work_struct *work)
+{
+	struct qla_hw_data *ha =
+		container_of(work, struct qla_hw_data, idc_state_handler);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	uint32_t dev_state = 0;
+
+	qla83xx_idc_lock(base_vha, 0);
+	qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+	if (dev_state == QLA8XXX_DEV_FAILED ||
+			dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
+		qla83xx_idc_state_handler(base_vha);
+	qla83xx_idc_unlock(base_vha, 0);
+}
+
+static int
+qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
+{
+	int rval = QLA_SUCCESS;
+	unsigned long heart_beat_wait = jiffies + (1 * HZ);
+	uint32_t heart_beat_counter1, heart_beat_counter2;
+
+	do {
+		if (time_after(jiffies, heart_beat_wait)) {
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
+			    "Nic Core f/w is not alive.\n");
+			rval = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		qla83xx_idc_lock(base_vha, 0);
+		qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+		    &heart_beat_counter1);
+		qla83xx_idc_unlock(base_vha, 0);
+		msleep(100);
+		qla83xx_idc_lock(base_vha, 0);
+		qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
+		    &heart_beat_counter2);
+		qla83xx_idc_unlock(base_vha, 0);
+	} while (heart_beat_counter1 == heart_beat_counter2);
+
+	return rval;
+}
+
+/* Work: Perform NIC Core Reset handling */
+void
+qla83xx_nic_core_reset_work(struct work_struct *work)
+{
+	struct qla_hw_data *ha =
+		container_of(work, struct qla_hw_data, nic_core_reset);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	uint32_t dev_state = 0;
+
+	if (IS_QLA2031(ha)) {
+		if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
+			ql_log(ql_log_warn, base_vha, 0xb081,
+			    "Failed to dump mctp\n");
+		return;
+	}
+
+	if (!ha->flags.nic_core_reset_hdlr_active) {
+		if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
+			qla83xx_idc_lock(base_vha, 0);
+			qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+			    &dev_state);
+			qla83xx_idc_unlock(base_vha, 0);
+			if (dev_state != QLA8XXX_DEV_NEED_RESET) {
+				ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
+				    "Nic Core f/w is alive.\n");
+				return;
+			}
+		}
+
+		ha->flags.nic_core_reset_hdlr_active = 1;
+		if (qla83xx_nic_core_reset(base_vha)) {
+			/* NIC Core reset failed. */
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
+			    "NIC Core reset failed.\n");
+		}
+		ha->flags.nic_core_reset_hdlr_active = 0;
+	}
+}
+
+/* Work: Handle 8200 IDC aens */
+void
+qla83xx_service_idc_aen(struct work_struct *work)
+{
+	struct qla_hw_data *ha =
+		container_of(work, struct qla_hw_data, idc_aen);
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	uint32_t dev_state, idc_control;
+
+	qla83xx_idc_lock(base_vha, 0);
+	qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+	qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
+	qla83xx_idc_unlock(base_vha, 0);
+	if (dev_state == QLA8XXX_DEV_NEED_RESET) {
+		if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
+			    "Application requested NIC Core Reset.\n");
+			qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+		} else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
+		    QLA_SUCCESS) {
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
+			    "Other protocol driver requested NIC Core Reset.\n");
+			qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
+		}
+	} else if (dev_state == QLA8XXX_DEV_FAILED ||
+			dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+		qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
+	}
+}
+
+static void
+qla83xx_wait_logic(void)
+{
+	int i;
+
+	/* Yield CPU */
+	if (!in_interrupt()) {
+		/*
+		 * Wait about 200ms before retrying again.
+		 * This controls the number of retries for single
+		 * lock operation.
+		 */
+		msleep(100);
+		schedule();
+	} else {
+		for (i = 0; i < 20; i++)
+			cpu_relax(); /* This a nop instr on i386 */
+	}
+}
+
+static int
+qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
+{
+	int rval;
+	uint32_t data;
+	uint32_t idc_lck_rcvry_stage_mask = 0x3;
+	uint32_t idc_lck_rcvry_owner_mask = 0x3c;
+	struct qla_hw_data *ha = base_vha->hw;
+	ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+	    "Trying force recovery of the IDC lock.\n");
+
+	rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
+	if (rval)
+		return rval;
+
+	if ((data & idc_lck_rcvry_stage_mask) > 0) {
+		return QLA_SUCCESS;
+	} else {
+		data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
+		rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+		    data);
+		if (rval)
+			return rval;
+
+		msleep(200);
+
+		rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
+		    &data);
+		if (rval)
+			return rval;
+
+		if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
+			data &= (IDC_LOCK_RECOVERY_STAGE2 |
+					~(idc_lck_rcvry_stage_mask));
+			rval = qla83xx_wr_reg(base_vha,
+			    QLA83XX_IDC_LOCK_RECOVERY, data);
+			if (rval)
+				return rval;
+
+			/* Forcefully perform IDC UnLock */
+			rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
+			    &data);
+			if (rval)
+				return rval;
+			/* Clear lock-id by setting 0xff */
+			rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+			    0xff);
+			if (rval)
+				return rval;
+			/* Clear lock-recovery by setting 0x0 */
+			rval = qla83xx_wr_reg(base_vha,
+			    QLA83XX_IDC_LOCK_RECOVERY, 0x0);
+			if (rval)
+				return rval;
+		} else
+			return QLA_SUCCESS;
+	}
+
+	return rval;
+}
+
+static int
+qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t o_drv_lockid, n_drv_lockid;
+	unsigned long lock_recovery_timeout;
+
+	lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
+retry_lockid:
+	rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
+	if (rval)
+		goto exit;
+
+	/* MAX wait time before forcing IDC Lock recovery = 2 secs */
+	if (time_after_eq(jiffies, lock_recovery_timeout)) {
+		if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
+			return QLA_SUCCESS;
+		else
+			return QLA_FUNCTION_FAILED;
+	}
+
+	rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
+	if (rval)
+		goto exit;
+
+	if (o_drv_lockid == n_drv_lockid) {
+		qla83xx_wait_logic();
+		goto retry_lockid;
+	} else
+		return QLA_SUCCESS;
+
+exit:
+	return rval;
+}
+
+void
+qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+	uint16_t options = (requester_id << 15) | BIT_6;
+	uint32_t data;
+	uint32_t lock_owner;
+	struct qla_hw_data *ha = base_vha->hw;
+
+	/* IDC-lock implementation using driver-lock/lock-id remote registers */
+retry_lock:
+	if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
+	    == QLA_SUCCESS) {
+		if (data) {
+			/* Setting lock-id to our function-number */
+			qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+			    ha->portnum);
+		} else {
+			qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+			    &lock_owner);
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
+			    "Failed to acquire IDC lock, acquired by %d, "
+			    "retrying...\n", lock_owner);
+
+			/* Retry/Perform IDC-Lock recovery */
+			if (qla83xx_idc_lock_recovery(base_vha)
+			    == QLA_SUCCESS) {
+				qla83xx_wait_logic();
+				goto retry_lock;
+			} else
+				ql_log(ql_log_warn, base_vha, 0xb075,
+				    "IDC Lock recovery FAILED.\n");
+		}
+
+	}
+
+	return;
+
+	/* XXX: IDC-lock implementation using access-control mbx */
+retry_lock2:
+	if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+		ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
+		    "Failed to acquire IDC lock. retrying...\n");
+		/* Retry/Perform IDC-Lock recovery */
+		if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
+			qla83xx_wait_logic();
+			goto retry_lock2;
+		} else
+			ql_log(ql_log_warn, base_vha, 0xb076,
+			    "IDC Lock recovery FAILED.\n");
+	}
+
+	return;
+}
+
+void
+qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
+{
+#if 0
+	uint16_t options = (requester_id << 15) | BIT_7;
+#endif
+	uint16_t retry;
+	uint32_t data;
+	struct qla_hw_data *ha = base_vha->hw;
+
+	/* IDC-unlock implementation using driver-unlock/lock-id
+	 * remote registers
+	 */
+	retry = 0;
+retry_unlock:
+	if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
+	    == QLA_SUCCESS) {
+		if (data == ha->portnum) {
+			qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
+			/* Clearing lock-id by setting 0xff */
+			qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
+		} else if (retry < 10) {
+			/* SV: XXX: IDC unlock retrying needed here? */
+
+			/* Retry for IDC-unlock */
+			qla83xx_wait_logic();
+			retry++;
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
+			    "Failed to release IDC lock, retrying=%d\n", retry);
+			goto retry_unlock;
+		}
+	} else if (retry < 10) {
+		/* Retry for IDC-unlock */
+		qla83xx_wait_logic();
+		retry++;
+		ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
+		    "Failed to read drv-lockid, retrying=%d\n", retry);
+		goto retry_unlock;
+	}
+
+	return;
+
+#if 0
+	/* XXX: IDC-unlock implementation using access-control mbx */
+	retry = 0;
+retry_unlock2:
+	if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
+		if (retry < 10) {
+			/* Retry for IDC-unlock */
+			qla83xx_wait_logic();
+			retry++;
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
+			    "Failed to release IDC lock, retrying=%d\n", retry);
+			goto retry_unlock2;
+		}
+	}
+
+	return;
+#endif
+}
+
+int
+__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t drv_presence;
+
+	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+	if (rval == QLA_SUCCESS) {
+		drv_presence |= (1 << ha->portnum);
+		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+		    drv_presence);
+	}
+
+	return rval;
+}
+
+int
+qla83xx_set_drv_presence(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+
+	qla83xx_idc_lock(vha, 0);
+	rval = __qla83xx_set_drv_presence(vha);
+	qla83xx_idc_unlock(vha, 0);
+
+	return rval;
+}
+
+int
+__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t drv_presence;
+
+	rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+	if (rval == QLA_SUCCESS) {
+		drv_presence &= ~(1 << ha->portnum);
+		rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+		    drv_presence);
+	}
+
+	return rval;
+}
+
+int
+qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+
+	qla83xx_idc_lock(vha, 0);
+	rval = __qla83xx_clear_drv_presence(vha);
+	qla83xx_idc_unlock(vha, 0);
+
+	return rval;
+}
+
+static void
+qla83xx_need_reset_handler(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t drv_ack, drv_presence;
+	unsigned long ack_timeout;
+
+	/* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
+	ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+	while (1) {
+		qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
+		qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+		if ((drv_ack & drv_presence) == drv_presence)
+			break;
+
+		if (time_after_eq(jiffies, ack_timeout)) {
+			ql_log(ql_log_warn, vha, 0xb067,
+			    "RESET ACK TIMEOUT! drv_presence=0x%x "
+			    "drv_ack=0x%x\n", drv_presence, drv_ack);
+			/*
+			 * The function(s) which did not ack in time are forced
+			 * to withdraw any further participation in the IDC
+			 * reset.
+			 */
+			if (drv_ack != drv_presence)
+				qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
+				    drv_ack);
+			break;
+		}
+
+		qla83xx_idc_unlock(vha, 0);
+		msleep(1000);
+		qla83xx_idc_lock(vha, 0);
+	}
+
+	qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
+	ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
+}
+
+static int
+qla83xx_device_bootstrap(scsi_qla_host_t *vha)
+{
+	int rval = QLA_SUCCESS;
+	uint32_t idc_control;
+
+	qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
+	ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
+
+	/* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
+	__qla83xx_get_idc_control(vha, &idc_control);
+	idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
+	__qla83xx_set_idc_control(vha, 0);
+
+	qla83xx_idc_unlock(vha, 0);
+	rval = qla83xx_restart_nic_firmware(vha);
+	qla83xx_idc_lock(vha, 0);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, vha, 0xb06a,
+		    "Failed to restart NIC f/w.\n");
+		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
+		ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
+	} else {
+		ql_dbg(ql_dbg_p3p, vha, 0xb06c,
+		    "Success in restarting nic f/w.\n");
+		qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
+		ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
+	}
+
+	return rval;
+}
+
+/* Assumes idc_lock always held on entry */
+int
+qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
+{
+	struct qla_hw_data *ha = base_vha->hw;
+	int rval = QLA_SUCCESS;
+	unsigned long dev_init_timeout;
+	uint32_t dev_state;
+
+	/* Wait for MAX-INIT-TIMEOUT for the device to go ready */
+	dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+	while (1) {
+
+		if (time_after_eq(jiffies, dev_init_timeout)) {
+			ql_log(ql_log_warn, base_vha, 0xb06e,
+			    "Initialization TIMEOUT!\n");
+			/* Init timeout. Disable further NIC Core
+			 * communication.
+			 */
+			qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
+				QLA8XXX_DEV_FAILED);
+			ql_log(ql_log_info, base_vha, 0xb06f,
+			    "HW State: FAILED.\n");
+		}
+
+		qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
+		switch (dev_state) {
+		case QLA8XXX_DEV_READY:
+			if (ha->flags.nic_core_reset_owner)
+				qla83xx_idc_audit(base_vha,
+				    IDC_AUDIT_COMPLETION);
+			ha->flags.nic_core_reset_owner = 0;
+			ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
+			    "Reset_owner reset by 0x%x.\n",
+			    ha->portnum);
+			goto exit;
+		case QLA8XXX_DEV_COLD:
+			if (ha->flags.nic_core_reset_owner)
+				rval = qla83xx_device_bootstrap(base_vha);
+			else {
+			/* Wait for AEN to change device-state */
+				qla83xx_idc_unlock(base_vha, 0);
+				msleep(1000);
+				qla83xx_idc_lock(base_vha, 0);
+			}
+			break;
+		case QLA8XXX_DEV_INITIALIZING:
+			/* Wait for AEN to change device-state */
+			qla83xx_idc_unlock(base_vha, 0);
+			msleep(1000);
+			qla83xx_idc_lock(base_vha, 0);
+			break;
+		case QLA8XXX_DEV_NEED_RESET:
+			if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
+				qla83xx_need_reset_handler(base_vha);
+			else {
+				/* Wait for AEN to change device-state */
+				qla83xx_idc_unlock(base_vha, 0);
+				msleep(1000);
+				qla83xx_idc_lock(base_vha, 0);
+			}
+			/* reset timeout value after need reset handler */
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_dev_init_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_NEED_QUIESCENT:
+			/* XXX: DEBUG for now */
+			qla83xx_idc_unlock(base_vha, 0);
+			msleep(1000);
+			qla83xx_idc_lock(base_vha, 0);
+			break;
+		case QLA8XXX_DEV_QUIESCENT:
+			/* XXX: DEBUG for now */
+			if (ha->flags.quiesce_owner)
+				goto exit;
+
+			qla83xx_idc_unlock(base_vha, 0);
+			msleep(1000);
+			qla83xx_idc_lock(base_vha, 0);
+			dev_init_timeout = jiffies +
+			    (ha->fcoe_dev_init_timeout * HZ);
+			break;
+		case QLA8XXX_DEV_FAILED:
+			if (ha->flags.nic_core_reset_owner)
+				qla83xx_idc_audit(base_vha,
+				    IDC_AUDIT_COMPLETION);
+			ha->flags.nic_core_reset_owner = 0;
+			__qla83xx_clear_drv_presence(base_vha);
+			qla83xx_idc_unlock(base_vha, 0);
+			qla8xxx_dev_failed_handler(base_vha);
+			rval = QLA_FUNCTION_FAILED;
+			qla83xx_idc_lock(base_vha, 0);
+			goto exit;
+		case QLA8XXX_BAD_VALUE:
+			qla83xx_idc_unlock(base_vha, 0);
+			msleep(1000);
+			qla83xx_idc_lock(base_vha, 0);
+			break;
+		default:
+			ql_log(ql_log_warn, base_vha, 0xb071,
+			    "Unknown Device State: %x.\n", dev_state);
+			qla83xx_idc_unlock(base_vha, 0);
+			qla8xxx_dev_failed_handler(base_vha);
+			rval = QLA_FUNCTION_FAILED;
+			qla83xx_idc_lock(base_vha, 0);
+			goto exit;
+		}
+	}
+
+exit:
+	return rval;
+}
+
+void
+qla2x00_disable_board_on_pci_error(struct work_struct *work)
+{
+	struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
+	    board_disable);
+	struct pci_dev *pdev = ha->pdev;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	/*
+	 * if UNLOAD flag is already set, then continue unload,
+	 * where it was set first.
+	 */
+	if (test_bit(UNLOADING, &base_vha->dpc_flags))
+		return;
+
+	ql_log(ql_log_warn, base_vha, 0x015b,
+	    "Disabling adapter.\n");
+
+	if (!atomic_read(&pdev->enable_cnt)) {
+		ql_log(ql_log_info, base_vha, 0xfffc,
+		    "PCI device disabled, no action req for PCI error=%lx\n",
+		    base_vha->pci_flags);
+		return;
+	}
+
+	qla2x00_wait_for_sess_deletion(base_vha);
+
+	set_bit(UNLOADING, &base_vha->dpc_flags);
+
+	qla2x00_delete_all_vps(ha, base_vha);
+
+	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+	qla2x00_dfs_remove(base_vha);
+
+	qla84xx_put_chip(base_vha);
+
+	if (base_vha->timer_active)
+		qla2x00_stop_timer(base_vha);
+
+	base_vha->flags.online = 0;
+
+	qla2x00_destroy_deferred_work(ha);
+
+	/*
+	 * Do not try to stop beacon blink as it will issue a mailbox
+	 * command.
+	 */
+	qla2x00_free_sysfs_attr(base_vha, false);
+
+	fc_remove_host(base_vha->host);
+
+	scsi_remove_host(base_vha->host);
+
+	base_vha->flags.init_done = 0;
+	qla25xx_delete_queues(base_vha);
+	qla2x00_free_fcports(base_vha);
+	qla2x00_free_irqs(base_vha);
+	qla2x00_mem_free(ha);
+	qla82xx_md_free(base_vha);
+	qla2x00_free_queues(ha);
+
+	qla2x00_unmap_iobases(ha);
+
+	pci_release_selected_regions(ha->pdev, ha->bars);
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+
+	/*
+	 * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
+	 */
+}
+
+/**************************************************************************
+* qla2x00_do_dpc
+*   This kernel thread is a task that is schedule by the interrupt handler
+*   to perform the background processing for interrupts.
+*
+* Notes:
+* This task always run in the context of a kernel thread.  It
+* is kick-off by the driver's detect code and starts up
+* up one per adapter. It immediately goes to sleep and waits for
+* some fibre event.  When either the interrupt handler or
+* the timer routine detects a event it will one of the task
+* bits then wake us up.
+**************************************************************************/
+static int
+qla2x00_do_dpc(void *data)
+{
+	scsi_qla_host_t *base_vha;
+	struct qla_hw_data *ha;
+	uint32_t online;
+	struct qla_qpair *qpair;
+
+	ha = (struct qla_hw_data *)data;
+	base_vha = pci_get_drvdata(ha->pdev);
+
+	set_user_nice(current, MIN_NICE);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
+		    "DPC handler sleeping.\n");
+
+		schedule();
+
+		if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+			goto end_loop;
+
+		if (ha->flags.eeh_busy) {
+			ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
+			    "eeh_busy=%d.\n", ha->flags.eeh_busy);
+			goto end_loop;
+		}
+
+		ha->dpc_active = 1;
+
+		ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+		    "DPC handler waking up, dpc_flags=0x%lx.\n",
+		    base_vha->dpc_flags);
+
+		if (test_bit(UNLOADING, &base_vha->dpc_flags))
+			break;
+
+		qla2x00_do_work(base_vha);
+
+		if (IS_P3P_TYPE(ha)) {
+			if (IS_QLA8044(ha)) {
+				if (test_and_clear_bit(ISP_UNRECOVERABLE,
+					&base_vha->dpc_flags)) {
+					qla8044_idc_lock(ha);
+					qla8044_wr_direct(base_vha,
+						QLA8044_CRB_DEV_STATE_INDEX,
+						QLA8XXX_DEV_FAILED);
+					qla8044_idc_unlock(ha);
+					ql_log(ql_log_info, base_vha, 0x4004,
+						"HW State: FAILED.\n");
+					qla8044_device_state_handler(base_vha);
+					continue;
+				}
+
+			} else {
+				if (test_and_clear_bit(ISP_UNRECOVERABLE,
+					&base_vha->dpc_flags)) {
+					qla82xx_idc_lock(ha);
+					qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+						QLA8XXX_DEV_FAILED);
+					qla82xx_idc_unlock(ha);
+					ql_log(ql_log_info, base_vha, 0x0151,
+						"HW State: FAILED.\n");
+					qla82xx_device_state_handler(base_vha);
+					continue;
+				}
+			}
+
+			if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
+				&base_vha->dpc_flags)) {
+
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
+				    "FCoE context reset scheduled.\n");
+				if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+					&base_vha->dpc_flags))) {
+					if (qla82xx_fcoe_ctx_reset(base_vha)) {
+						/* FCoE-ctx reset failed.
+						 * Escalate to chip-reset
+						 */
+						set_bit(ISP_ABORT_NEEDED,
+							&base_vha->dpc_flags);
+					}
+					clear_bit(ABORT_ISP_ACTIVE,
+						&base_vha->dpc_flags);
+				}
+
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
+				    "FCoE context reset end.\n");
+			}
+		} else if (IS_QLAFX00(ha)) {
+			if (test_and_clear_bit(ISP_UNRECOVERABLE,
+				&base_vha->dpc_flags)) {
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
+				    "Firmware Reset Recovery\n");
+				if (qlafx00_reset_initialize(base_vha)) {
+					/* Failed. Abort isp later. */
+					if (!test_bit(UNLOADING,
+					    &base_vha->dpc_flags)) {
+						set_bit(ISP_UNRECOVERABLE,
+						    &base_vha->dpc_flags);
+						ql_dbg(ql_dbg_dpc, base_vha,
+						    0x4021,
+						    "Reset Recovery Failed\n");
+					}
+				}
+			}
+
+			if (test_and_clear_bit(FX00_TARGET_SCAN,
+				&base_vha->dpc_flags)) {
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
+				    "ISPFx00 Target Scan scheduled\n");
+				if (qlafx00_rescan_isp(base_vha)) {
+					if (!test_bit(UNLOADING,
+					    &base_vha->dpc_flags))
+						set_bit(ISP_UNRECOVERABLE,
+						    &base_vha->dpc_flags);
+					ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
+					    "ISPFx00 Target Scan Failed\n");
+				}
+				ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
+				    "ISPFx00 Target Scan End\n");
+			}
+			if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
+				&base_vha->dpc_flags)) {
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
+				    "ISPFx00 Host Info resend scheduled\n");
+				qlafx00_fx_disc(base_vha,
+				    &base_vha->hw->mr.fcport,
+				    FXDISC_REG_HOST_INFO);
+			}
+		}
+
+		if (test_and_clear_bit(DETECT_SFP_CHANGE,
+			&base_vha->dpc_flags) &&
+		    !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
+			qla24xx_detect_sfp(base_vha);
+
+			if (ha->flags.detected_lr_sfp !=
+			    ha->flags.using_lr_setting)
+				set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		}
+
+		if (test_and_clear_bit
+		    (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+		    !test_bit(UNLOADING, &base_vha->dpc_flags)) {
+			bool do_reset = true;
+
+			switch (ql2x_ini_mode) {
+			case QLA2XXX_INI_MODE_ENABLED:
+				break;
+			case QLA2XXX_INI_MODE_DISABLED:
+				if (!qla_tgt_mode_enabled(base_vha))
+					do_reset = false;
+				break;
+			case QLA2XXX_INI_MODE_DUAL:
+				if (!qla_dual_mode_enabled(base_vha))
+					do_reset = false;
+				break;
+			default:
+				break;
+			}
+
+			if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
+			    &base_vha->dpc_flags))) {
+				base_vha->flags.online = 1;
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+				    "ISP abort scheduled.\n");
+				if (ha->isp_ops->abort_isp(base_vha)) {
+					/* failed. retry later */
+					set_bit(ISP_ABORT_NEEDED,
+					    &base_vha->dpc_flags);
+				}
+				clear_bit(ABORT_ISP_ACTIVE,
+						&base_vha->dpc_flags);
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+				    "ISP abort end.\n");
+			}
+		}
+
+		if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
+		    &base_vha->dpc_flags)) {
+			qla2x00_update_fcports(base_vha);
+		}
+
+		if (IS_QLAFX00(ha))
+			goto loop_resync_check;
+
+		if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
+			ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
+			    "Quiescence mode scheduled.\n");
+			if (IS_P3P_TYPE(ha)) {
+				if (IS_QLA82XX(ha))
+					qla82xx_device_state_handler(base_vha);
+				if (IS_QLA8044(ha))
+					qla8044_device_state_handler(base_vha);
+				clear_bit(ISP_QUIESCE_NEEDED,
+				    &base_vha->dpc_flags);
+				if (!ha->flags.quiesce_owner) {
+					qla2x00_perform_loop_resync(base_vha);
+					if (IS_QLA82XX(ha)) {
+						qla82xx_idc_lock(ha);
+						qla82xx_clear_qsnt_ready(
+						    base_vha);
+						qla82xx_idc_unlock(ha);
+					} else if (IS_QLA8044(ha)) {
+						qla8044_idc_lock(ha);
+						qla8044_clear_qsnt_ready(
+						    base_vha);
+						qla8044_idc_unlock(ha);
+					}
+				}
+			} else {
+				clear_bit(ISP_QUIESCE_NEEDED,
+				    &base_vha->dpc_flags);
+				qla2x00_quiesce_io(base_vha);
+			}
+			ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
+			    "Quiescence mode end.\n");
+		}
+
+		if (test_and_clear_bit(RESET_MARKER_NEEDED,
+				&base_vha->dpc_flags) &&
+		    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
+
+			ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
+			    "Reset marker scheduled.\n");
+			qla2x00_rst_aen(base_vha);
+			clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
+			ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
+			    "Reset marker end.\n");
+		}
+
+		/* Retry each device up to login retry count */
+		if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
+		    !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
+		    atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
+
+			if (!base_vha->relogin_jif ||
+			    time_after_eq(jiffies, base_vha->relogin_jif)) {
+				base_vha->relogin_jif = jiffies + HZ;
+				clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
+
+				ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
+				    "Relogin scheduled.\n");
+				qla2x00_relogin(base_vha);
+				ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
+				    "Relogin end.\n");
+			}
+		}
+loop_resync_check:
+		if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+		    &base_vha->dpc_flags)) {
+
+			ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
+			    "Loop resync scheduled.\n");
+
+			if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
+			    &base_vha->dpc_flags))) {
+
+				qla2x00_loop_resync(base_vha);
+
+				clear_bit(LOOP_RESYNC_ACTIVE,
+						&base_vha->dpc_flags);
+			}
+
+			ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
+			    "Loop resync end.\n");
+		}
+
+		if (IS_QLAFX00(ha))
+			goto intr_on_check;
+
+		if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
+		    atomic_read(&base_vha->loop_state) == LOOP_READY) {
+			clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
+			qla2xxx_flash_npiv_conf(base_vha);
+		}
+
+intr_on_check:
+		if (!ha->interrupts_on)
+			ha->isp_ops->enable_intrs(ha);
+
+		if (test_and_clear_bit(BEACON_BLINK_NEEDED,
+					&base_vha->dpc_flags)) {
+			if (ha->beacon_blink_led == 1)
+				ha->isp_ops->beacon_blink(base_vha);
+		}
+
+		/* qpair online check */
+		if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+		    &base_vha->dpc_flags)) {
+			if (ha->flags.eeh_busy ||
+			    ha->flags.pci_channel_io_perm_failure)
+				online = 0;
+			else
+				online = 1;
+
+			mutex_lock(&ha->mq_lock);
+			list_for_each_entry(qpair, &base_vha->qp_list,
+			    qp_list_elem)
+			qpair->online = online;
+			mutex_unlock(&ha->mq_lock);
+		}
+
+		if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+			ql_log(ql_log_info, base_vha, 0xffffff,
+				"nvme: SET ZIO Activity exchange threshold to %d.\n",
+						ha->nvme_last_rptd_aen);
+			if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+				ql_log(ql_log_info, base_vha, 0xffffff,
+					"nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+						ha->nvme_last_rptd_aen);
+			}
+		}
+
+		if (!IS_QLAFX00(ha))
+			qla2x00_do_dpc_all_vps(base_vha);
+
+		ha->dpc_active = 0;
+end_loop:
+		set_current_state(TASK_INTERRUPTIBLE);
+	} /* End of while(1) */
+	__set_current_state(TASK_RUNNING);
+
+	ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
+	    "DPC handler exiting.\n");
+
+	/*
+	 * Make sure that nobody tries to wake us up again.
+	 */
+	ha->dpc_active = 0;
+
+	/* Cleanup any residual CTX SRBs. */
+	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
+	return 0;
+}
+
+void
+qla2xxx_wake_dpc(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct task_struct *t = ha->dpc_thread;
+
+	if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
+		wake_up_process(t);
+}
+
+/*
+*  qla2x00_rst_aen
+*      Processes asynchronous reset.
+*
+* Input:
+*      ha  = adapter block pointer.
+*/
+static void
+qla2x00_rst_aen(scsi_qla_host_t *vha)
+{
+	if (vha->flags.online && !vha->flags.reset_active &&
+	    !atomic_read(&vha->loop_down_timer) &&
+	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
+		do {
+			clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+			/*
+			 * Issue marker command only when we are going to start
+			 * the I/O.
+			 */
+			vha->marker_needed = 1;
+		} while (!atomic_read(&vha->loop_down_timer) &&
+		    (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
+	}
+}
+
+/**************************************************************************
+*   qla2x00_timer
+*
+* Description:
+*   One second timer
+*
+* Context: Interrupt
+***************************************************************************/
+void
+qla2x00_timer(scsi_qla_host_t *vha)
+{
+	unsigned long	cpu_flags = 0;
+	int		start_dpc = 0;
+	int		index;
+	srb_t		*sp;
+	uint16_t        w;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req;
+
+	if (ha->flags.eeh_busy) {
+		ql_dbg(ql_dbg_timer, vha, 0x6000,
+		    "EEH = %d, restarting timer.\n",
+		    ha->flags.eeh_busy);
+		qla2x00_restart_timer(vha, WATCH_INTERVAL);
+		return;
+	}
+
+	/*
+	 * Hardware read to raise pending EEH errors during mailbox waits. If
+	 * the read returns -1 then disable the board.
+	 */
+	if (!pci_channel_offline(ha->pdev)) {
+		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+		qla2x00_check_reg16_for_disconnect(vha, w);
+	}
+
+	/* Make sure qla82xx_watchdog is run only for physical port */
+	if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
+		if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
+			start_dpc++;
+		if (IS_QLA82XX(ha))
+			qla82xx_watchdog(vha);
+		else if (IS_QLA8044(ha))
+			qla8044_watchdog(vha);
+	}
+
+	if (!vha->vp_idx && IS_QLAFX00(ha))
+		qlafx00_timer_routine(vha);
+
+	/* Loop down handler. */
+	if (atomic_read(&vha->loop_down_timer) > 0 &&
+	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+	    !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
+		&& vha->flags.online) {
+
+		if (atomic_read(&vha->loop_down_timer) ==
+		    vha->loop_down_abort_time) {
+
+			ql_log(ql_log_info, vha, 0x6008,
+			    "Loop down - aborting the queues before time expires.\n");
+
+			if (!IS_QLA2100(ha) && vha->link_down_timeout)
+				atomic_set(&vha->loop_state, LOOP_DEAD);
+
+			/*
+			 * Schedule an ISP abort to return any FCP2-device
+			 * commands.
+			 */
+			/* NPIV - scan physical port only */
+			if (!vha->vp_idx) {
+				spin_lock_irqsave(&ha->hardware_lock,
+				    cpu_flags);
+				req = ha->req_q_map[0];
+				for (index = 1;
+				    index < req->num_outstanding_cmds;
+				    index++) {
+					fc_port_t *sfcp;
+
+					sp = req->outstanding_cmds[index];
+					if (!sp)
+						continue;
+					if (sp->cmd_type != TYPE_SRB)
+						continue;
+					if (sp->type != SRB_SCSI_CMD)
+						continue;
+					sfcp = sp->fcport;
+					if (!(sfcp->flags & FCF_FCP2_DEVICE))
+						continue;
+
+					if (IS_QLA82XX(ha))
+						set_bit(FCOE_CTX_RESET_NEEDED,
+							&vha->dpc_flags);
+					else
+						set_bit(ISP_ABORT_NEEDED,
+							&vha->dpc_flags);
+					break;
+				}
+				spin_unlock_irqrestore(&ha->hardware_lock,
+								cpu_flags);
+			}
+			start_dpc++;
+		}
+
+		/* if the loop has been down for 4 minutes, reinit adapter */
+		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
+			if (!(vha->device_flags & DFLG_NO_CABLE)) {
+				ql_log(ql_log_warn, vha, 0x6009,
+				    "Loop down - aborting ISP.\n");
+
+				if (IS_QLA82XX(ha))
+					set_bit(FCOE_CTX_RESET_NEEDED,
+						&vha->dpc_flags);
+				else
+					set_bit(ISP_ABORT_NEEDED,
+						&vha->dpc_flags);
+			}
+		}
+		ql_dbg(ql_dbg_timer, vha, 0x600a,
+		    "Loop down - seconds remaining %d.\n",
+		    atomic_read(&vha->loop_down_timer));
+	}
+	/* Check if beacon LED needs to be blinked for physical host only */
+	if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
+		/* There is no beacon_blink function for ISP82xx */
+		if (!IS_P3P_TYPE(ha)) {
+			set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
+			start_dpc++;
+		}
+	}
+
+	/* Process any deferred work. */
+	if (!list_empty(&vha->work_list))
+		start_dpc++;
+
+	/*
+	 * FC-NVME
+	 * see if the active AEN count has changed from what was last reported.
+	 */
+	if (!vha->vp_idx &&
+		atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
+		ha->zio_mode == QLA_ZIO_MODE_6) {
+		ql_log(ql_log_info, vha, 0x3002,
+			"nvme: Sched: Set ZIO exchange threshold to %d.\n",
+			ha->nvme_last_rptd_aen);
+		ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+		set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+		start_dpc++;
+	}
+
+	/* Schedule the DPC routine if needed */
+	if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+	    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
+	    start_dpc ||
+	    test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
+	    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+	    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
+	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
+	    test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
+	    test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+		ql_dbg(ql_dbg_timer, vha, 0x600b,
+		    "isp_abort_needed=%d loop_resync_needed=%d "
+		    "fcport_update_needed=%d start_dpc=%d "
+		    "reset_marker_needed=%d",
+		    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
+		    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
+		    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
+		    start_dpc,
+		    test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
+		ql_dbg(ql_dbg_timer, vha, 0x600c,
+		    "beacon_blink_needed=%d isp_unrecoverable=%d "
+		    "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
+		    "relogin_needed=%d.\n",
+		    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
+		    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
+		    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
+		    test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
+		    test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+		qla2xxx_wake_dpc(vha);
+	}
+
+	qla2x00_restart_timer(vha, WATCH_INTERVAL);
+}
+
+/* Firmware interface routines. */
+
+#define FW_BLOBS	11
+#define FW_ISP21XX	0
+#define FW_ISP22XX	1
+#define FW_ISP2300	2
+#define FW_ISP2322	3
+#define FW_ISP24XX	4
+#define FW_ISP25XX	5
+#define FW_ISP81XX	6
+#define FW_ISP82XX	7
+#define FW_ISP2031	8
+#define FW_ISP8031	9
+#define FW_ISP27XX	10
+
+#define FW_FILE_ISP21XX	"ql2100_fw.bin"
+#define FW_FILE_ISP22XX	"ql2200_fw.bin"
+#define FW_FILE_ISP2300	"ql2300_fw.bin"
+#define FW_FILE_ISP2322	"ql2322_fw.bin"
+#define FW_FILE_ISP24XX	"ql2400_fw.bin"
+#define FW_FILE_ISP25XX	"ql2500_fw.bin"
+#define FW_FILE_ISP81XX	"ql8100_fw.bin"
+#define FW_FILE_ISP82XX	"ql8200_fw.bin"
+#define FW_FILE_ISP2031	"ql2600_fw.bin"
+#define FW_FILE_ISP8031	"ql8300_fw.bin"
+#define FW_FILE_ISP27XX	"ql2700_fw.bin"
+
+
+static DEFINE_MUTEX(qla_fw_lock);
+
+static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
+	{ .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
+	{ .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
+	{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
+	{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
+	{ .name = FW_FILE_ISP24XX, },
+	{ .name = FW_FILE_ISP25XX, },
+	{ .name = FW_FILE_ISP81XX, },
+	{ .name = FW_FILE_ISP82XX, },
+	{ .name = FW_FILE_ISP2031, },
+	{ .name = FW_FILE_ISP8031, },
+	{ .name = FW_FILE_ISP27XX, },
+};
+
+struct fw_blob *
+qla2x00_request_firmware(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct fw_blob *blob;
+
+	if (IS_QLA2100(ha)) {
+		blob = &qla_fw_blobs[FW_ISP21XX];
+	} else if (IS_QLA2200(ha)) {
+		blob = &qla_fw_blobs[FW_ISP22XX];
+	} else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
+		blob = &qla_fw_blobs[FW_ISP2300];
+	} else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+		blob = &qla_fw_blobs[FW_ISP2322];
+	} else if (IS_QLA24XX_TYPE(ha)) {
+		blob = &qla_fw_blobs[FW_ISP24XX];
+	} else if (IS_QLA25XX(ha)) {
+		blob = &qla_fw_blobs[FW_ISP25XX];
+	} else if (IS_QLA81XX(ha)) {
+		blob = &qla_fw_blobs[FW_ISP81XX];
+	} else if (IS_QLA82XX(ha)) {
+		blob = &qla_fw_blobs[FW_ISP82XX];
+	} else if (IS_QLA2031(ha)) {
+		blob = &qla_fw_blobs[FW_ISP2031];
+	} else if (IS_QLA8031(ha)) {
+		blob = &qla_fw_blobs[FW_ISP8031];
+	} else if (IS_QLA27XX(ha)) {
+		blob = &qla_fw_blobs[FW_ISP27XX];
+	} else {
+		return NULL;
+	}
+
+	mutex_lock(&qla_fw_lock);
+	if (blob->fw)
+		goto out;
+
+	if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
+		ql_log(ql_log_warn, vha, 0x0063,
+		    "Failed to load firmware image (%s).\n", blob->name);
+		blob->fw = NULL;
+		blob = NULL;
+		goto out;
+	}
+
+out:
+	mutex_unlock(&qla_fw_lock);
+	return blob;
+}
+
+static void
+qla2x00_release_firmware(void)
+{
+	int idx;
+
+	mutex_lock(&qla_fw_lock);
+	for (idx = 0; idx < FW_BLOBS; idx++)
+		release_firmware(qla_fw_blobs[idx].fw);
+	mutex_unlock(&qla_fw_lock);
+}
+
+static pci_ers_result_t
+qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	scsi_qla_host_t *vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_aer, vha, 0x9000,
+	    "PCI error detected, state %x.\n", state);
+
+	if (!atomic_read(&pdev->enable_cnt)) {
+		ql_log(ql_log_info, vha, 0xffff,
+			"PCI device is disabled,state %x\n", state);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+
+	switch (state) {
+	case pci_channel_io_normal:
+		ha->flags.eeh_busy = 0;
+		if (ql2xmqsupport) {
+			set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		ha->flags.eeh_busy = 1;
+		/* For ISP82XX complete any pending mailbox cmd */
+		if (IS_QLA82XX(ha)) {
+			ha->flags.isp82xx_fw_hung = 1;
+			ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+			qla82xx_clear_pending_mbx(vha);
+		}
+		qla2x00_free_irqs(vha);
+		pci_disable_device(pdev);
+		/* Return back all IOs */
+		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+		if (ql2xmqsupport) {
+			set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		ha->flags.pci_channel_io_perm_failure = 1;
+		qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+		if (ql2xmqsupport) {
+			set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+		}
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t
+qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	int risc_paused = 0;
+	uint32_t stat;
+	unsigned long flags;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+
+	if (IS_QLA82XX(ha))
+		return PCI_ERS_RESULT_RECOVERED;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (IS_QLA2100(ha) || IS_QLA2200(ha)){
+		stat = RD_REG_DWORD(&reg->hccr);
+		if (stat & HCCR_RISC_PAUSE)
+			risc_paused = 1;
+	} else if (IS_QLA23XX(ha)) {
+		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+		if (stat & HSR_RISC_PAUSED)
+			risc_paused = 1;
+	} else if (IS_FWI2_CAPABLE(ha)) {
+		stat = RD_REG_DWORD(&reg24->host_status);
+		if (stat & HSRX_RISC_PAUSED)
+			risc_paused = 1;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (risc_paused) {
+		ql_log(ql_log_info, base_vha, 0x9003,
+		    "RISC paused -- mmio_enabled, Dumping firmware.\n");
+		ha->isp_ops->fw_dump(base_vha, 0);
+
+		return PCI_ERS_RESULT_NEED_RESET;
+	} else
+		return PCI_ERS_RESULT_RECOVERED;
+}
+
+static uint32_t
+qla82xx_error_recovery(scsi_qla_host_t *base_vha)
+{
+	uint32_t rval = QLA_FUNCTION_FAILED;
+	uint32_t drv_active = 0;
+	struct qla_hw_data *ha = base_vha->hw;
+	int fn;
+	struct pci_dev *other_pdev = NULL;
+
+	ql_dbg(ql_dbg_aer, base_vha, 0x9006,
+	    "Entered %s.\n", __func__);
+
+	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+	if (base_vha->flags.online) {
+		/* Abort all outstanding commands,
+		 * so as to be requeued later */
+		qla2x00_abort_isp_cleanup(base_vha);
+	}
+
+
+	fn = PCI_FUNC(ha->pdev->devfn);
+	while (fn > 0) {
+		fn--;
+		ql_dbg(ql_dbg_aer, base_vha, 0x9007,
+		    "Finding pci device at function = 0x%x.\n", fn);
+		other_pdev =
+		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
+		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
+		    fn));
+
+		if (!other_pdev)
+			continue;
+		if (atomic_read(&other_pdev->enable_cnt)) {
+			ql_dbg(ql_dbg_aer, base_vha, 0x9008,
+			    "Found PCI func available and enable at 0x%x.\n",
+			    fn);
+			pci_dev_put(other_pdev);
+			break;
+		}
+		pci_dev_put(other_pdev);
+	}
+
+	if (!fn) {
+		/* Reset owner */
+		ql_dbg(ql_dbg_aer, base_vha, 0x9009,
+		    "This devfn is reset owner = 0x%x.\n",
+		    ha->pdev->devfn);
+		qla82xx_idc_lock(ha);
+
+		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+		    QLA8XXX_DEV_INITIALIZING);
+
+		qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
+		    QLA82XX_IDC_VERSION);
+
+		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+		ql_dbg(ql_dbg_aer, base_vha, 0x900a,
+		    "drv_active = 0x%x.\n", drv_active);
+
+		qla82xx_idc_unlock(ha);
+		/* Reset if device is not already reset
+		 * drv_active would be 0 if a reset has already been done
+		 */
+		if (drv_active)
+			rval = qla82xx_start_firmware(base_vha);
+		else
+			rval = QLA_SUCCESS;
+		qla82xx_idc_lock(ha);
+
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_info, base_vha, 0x900b,
+			    "HW State: FAILED.\n");
+			qla82xx_clear_drv_active(ha);
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+			    QLA8XXX_DEV_FAILED);
+		} else {
+			ql_log(ql_log_info, base_vha, 0x900c,
+			    "HW State: READY.\n");
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+			    QLA8XXX_DEV_READY);
+			qla82xx_idc_unlock(ha);
+			ha->flags.isp82xx_fw_hung = 0;
+			rval = qla82xx_restart_isp(base_vha);
+			qla82xx_idc_lock(ha);
+			/* Clear driver state register */
+			qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
+			qla82xx_set_drv_active(base_vha);
+		}
+		qla82xx_idc_unlock(ha);
+	} else {
+		ql_dbg(ql_dbg_aer, base_vha, 0x900d,
+		    "This devfn is not reset owner = 0x%x.\n",
+		    ha->pdev->devfn);
+		if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
+		    QLA8XXX_DEV_READY)) {
+			ha->flags.isp82xx_fw_hung = 0;
+			rval = qla82xx_restart_isp(base_vha);
+			qla82xx_idc_lock(ha);
+			qla82xx_set_drv_active(base_vha);
+			qla82xx_idc_unlock(ha);
+		}
+	}
+	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+	return rval;
+}
+
+static pci_ers_result_t
+qla2xxx_pci_slot_reset(struct pci_dev *pdev)
+{
+	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
+	struct rsp_que *rsp;
+	int rc, retries = 10;
+
+	ql_dbg(ql_dbg_aer, base_vha, 0x9004,
+	    "Slot Reset.\n");
+
+	/* Workaround: qla2xxx driver which access hardware earlier
+	 * needs error state to be pci_channel_io_online.
+	 * Otherwise mailbox command timesout.
+	 */
+	pdev->error_state = pci_channel_io_normal;
+
+	pci_restore_state(pdev);
+
+	/* pci_restore_state() clears the saved_state flag of the device
+	 * save restored state which resets saved_state flag
+	 */
+	pci_save_state(pdev);
+
+	if (ha->mem_only)
+		rc = pci_enable_device_mem(pdev);
+	else
+		rc = pci_enable_device(pdev);
+
+	if (rc) {
+		ql_log(ql_log_warn, base_vha, 0x9005,
+		    "Can't re-enable PCI device after reset.\n");
+		goto exit_slot_reset;
+	}
+
+	rsp = ha->rsp_q_map[0];
+	if (qla2x00_request_irqs(ha, rsp))
+		goto exit_slot_reset;
+
+	if (ha->isp_ops->pci_config(base_vha))
+		goto exit_slot_reset;
+
+	if (IS_QLA82XX(ha)) {
+		if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
+			ret = PCI_ERS_RESULT_RECOVERED;
+			goto exit_slot_reset;
+		} else
+			goto exit_slot_reset;
+	}
+
+	while (ha->flags.mbox_busy && retries--)
+		msleep(1000);
+
+	set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+	if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
+		ret =  PCI_ERS_RESULT_RECOVERED;
+	clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+
+exit_slot_reset:
+	ql_dbg(ql_dbg_aer, base_vha, 0x900e,
+	    "slot_reset return %x.\n", ret);
+
+	return ret;
+}
+
+static void
+qla2xxx_pci_resume(struct pci_dev *pdev)
+{
+	scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+	struct qla_hw_data *ha = base_vha->hw;
+	int ret;
+
+	ql_dbg(ql_dbg_aer, base_vha, 0x900f,
+	    "pci_resume.\n");
+
+	ret = qla2x00_wait_for_hba_online(base_vha);
+	if (ret != QLA_SUCCESS) {
+		ql_log(ql_log_fatal, base_vha, 0x9002,
+		    "The device failed to resume I/O from slot/link_reset.\n");
+	}
+
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	ha->flags.eeh_busy = 0;
+}
+
+static void
+qla83xx_disable_laser(scsi_qla_host_t *vha)
+{
+	uint32_t reg, data, fn;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
+
+	/* pci func #/port # */
+	ql_dbg(ql_dbg_init, vha, 0x004b,
+	    "Disabling Laser for hba: %p\n", vha);
+
+	fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
+		(BIT_15|BIT_14|BIT_13|BIT_12));
+
+	fn = (fn >> 12);
+
+	if (fn & 1)
+		reg = PORT_1_2031;
+	else
+		reg = PORT_0_2031;
+
+	data = LASER_OFF_2031;
+
+	qla83xx_wr_reg(vha, reg, data);
+}
+
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+	int rc;
+	scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+	if (USER_CTRL_IRQ(vha->hw))
+		rc = blk_mq_map_queues(&shost->tag_set);
+	else
+		rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+	return rc;
+}
+
+static const struct pci_error_handlers qla2xxx_err_handler = {
+	.error_detected = qla2xxx_pci_error_detected,
+	.mmio_enabled = qla2xxx_pci_mmio_enabled,
+	.slot_reset = qla2xxx_pci_slot_reset,
+	.resume = qla2xxx_pci_resume,
+};
+
+static struct pci_device_id qla2xxx_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
+	{ 0 },
+};
+MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
+
+static struct pci_driver qla2xxx_pci_driver = {
+	.name		= QLA2XXX_DRIVER_NAME,
+	.driver		= {
+		.owner		= THIS_MODULE,
+	},
+	.id_table	= qla2xxx_pci_tbl,
+	.probe		= qla2x00_probe_one,
+	.remove		= qla2x00_remove_one,
+	.shutdown	= qla2x00_shutdown,
+	.err_handler	= &qla2xxx_err_handler,
+};
+
+static const struct file_operations apidev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = noop_llseek,
+};
+
+/**
+ * qla2x00_module_init - Module initialization.
+ **/
+static int __init
+qla2x00_module_init(void)
+{
+	int ret = 0;
+
+	/* Allocate cache for SRBs. */
+	srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
+	    SLAB_HWCACHE_ALIGN, NULL);
+	if (srb_cachep == NULL) {
+		ql_log(ql_log_fatal, NULL, 0x0001,
+		    "Unable to allocate SRB cache...Failing load!.\n");
+		return -ENOMEM;
+	}
+
+	/* Initialize target kmem_cache and mem_pools */
+	ret = qlt_init();
+	if (ret < 0) {
+		goto destroy_cache;
+	} else if (ret > 0) {
+		/*
+		 * If initiator mode is explictly disabled by qlt_init(),
+		 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+		 * performing scsi_scan_target() during LOOP UP event.
+		 */
+		qla2xxx_transport_functions.disable_target_scan = 1;
+		qla2xxx_transport_vport_functions.disable_target_scan = 1;
+	}
+
+	/* Derive version string. */
+	strcpy(qla2x00_version_str, QLA2XXX_VERSION);
+	if (ql2xextended_error_logging)
+		strcat(qla2x00_version_str, "-debug");
+	if (ql2xextended_error_logging == 1)
+		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+
+	qla2xxx_transport_template =
+	    fc_attach_transport(&qla2xxx_transport_functions);
+	if (!qla2xxx_transport_template) {
+		ql_log(ql_log_fatal, NULL, 0x0002,
+		    "fc_attach_transport failed...Failing load!.\n");
+		ret = -ENODEV;
+		goto qlt_exit;
+	}
+
+	apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
+	if (apidev_major < 0) {
+		ql_log(ql_log_fatal, NULL, 0x0003,
+		    "Unable to register char device %s.\n", QLA2XXX_APIDEV);
+	}
+
+	qla2xxx_transport_vport_template =
+	    fc_attach_transport(&qla2xxx_transport_vport_functions);
+	if (!qla2xxx_transport_vport_template) {
+		ql_log(ql_log_fatal, NULL, 0x0004,
+		    "fc_attach_transport vport failed...Failing load!.\n");
+		ret = -ENODEV;
+		goto unreg_chrdev;
+	}
+	ql_log(ql_log_info, NULL, 0x0005,
+	    "QLogic Fibre Channel HBA Driver: %s.\n",
+	    qla2x00_version_str);
+	ret = pci_register_driver(&qla2xxx_pci_driver);
+	if (ret) {
+		ql_log(ql_log_fatal, NULL, 0x0006,
+		    "pci_register_driver failed...ret=%d Failing load!.\n",
+		    ret);
+		goto release_vport_transport;
+	}
+	return ret;
+
+release_vport_transport:
+	fc_release_transport(qla2xxx_transport_vport_template);
+
+unreg_chrdev:
+	if (apidev_major >= 0)
+		unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+	fc_release_transport(qla2xxx_transport_template);
+
+qlt_exit:
+	qlt_exit();
+
+destroy_cache:
+	kmem_cache_destroy(srb_cachep);
+	return ret;
+}
+
+/**
+ * qla2x00_module_exit - Module cleanup.
+ **/
+static void __exit
+qla2x00_module_exit(void)
+{
+	unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+	pci_unregister_driver(&qla2xxx_pci_driver);
+	qla2x00_release_firmware();
+	kmem_cache_destroy(srb_cachep);
+	qlt_exit();
+	if (ctx_cachep)
+		kmem_cache_destroy(ctx_cachep);
+	fc_release_transport(qla2xxx_transport_template);
+	fc_release_transport(qla2xxx_transport_vport_template);
+}
+
+module_init(qla2x00_module_init);
+module_exit(qla2x00_module_exit);
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLA2XXX_VERSION);
+MODULE_FIRMWARE(FW_FILE_ISP21XX);
+MODULE_FIRMWARE(FW_FILE_ISP22XX);
+MODULE_FIRMWARE(FW_FILE_ISP2300);
+MODULE_FIRMWARE(FW_FILE_ISP2322);
+MODULE_FIRMWARE(FW_FILE_ISP24XX);
+MODULE_FIRMWARE(FW_FILE_ISP25XX);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_settings.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_settings.h
new file mode 100644
index 0000000..2fb7ebf
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_settings.h
@@ -0,0 +1,12 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#define MAX_RETRIES_OF_ISP_ABORT	5
+
+/* Max time to wait for the loop to be in LOOP_READY state */
+#define MAX_LOOP_TIMEOUT	(60 * 5)
+
+#include "qla_version.h"
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_sup.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_sup.c
new file mode 100644
index 0000000..b4336e0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_sup.c
@@ -0,0 +1,3259 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+/*
+ * NVRAM support routines
+ */
+
+/**
+ * qla2x00_lock_nvram_access() -
+ * @ha: HA context
+ */
+static void
+qla2x00_lock_nvram_access(struct qla_hw_data *ha)
+{
+	uint16_t data;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
+		data = RD_REG_WORD(&reg->nvram);
+		while (data & NVR_BUSY) {
+			udelay(100);
+			data = RD_REG_WORD(&reg->nvram);
+		}
+
+		/* Lock resource */
+		WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
+		RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+		udelay(5);
+		data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+		while ((data & BIT_0) == 0) {
+			/* Lock failed */
+			udelay(100);
+			WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
+			RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+			udelay(5);
+			data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+		}
+	}
+}
+
+/**
+ * qla2x00_unlock_nvram_access() -
+ * @ha: HA context
+ */
+static void
+qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
+		WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
+		RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+	}
+}
+
+/**
+ * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
+ * @ha: HA context
+ * @data: Serial interface selector
+ */
+static void
+qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+	    NVR_WRT_ENABLE);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+	WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+}
+
+/**
+ * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
+ *	NVRAM.
+ * @ha: HA context
+ * @nv_cmd: NVRAM command
+ *
+ * Bit definitions for NVRAM command:
+ *
+ *	Bit 26     = start bit
+ *	Bit 25, 24 = opcode
+ *	Bit 23-16  = address
+ *	Bit 15-0   = write data
+ *
+ * Returns the word read from nvram @addr.
+ */
+static uint16_t
+qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
+{
+	uint8_t		cnt;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint16_t	data = 0;
+	uint16_t	reg_data;
+
+	/* Send command to NVRAM. */
+	nv_cmd <<= 5;
+	for (cnt = 0; cnt < 11; cnt++) {
+		if (nv_cmd & BIT_31)
+			qla2x00_nv_write(ha, NVR_DATA_OUT);
+		else
+			qla2x00_nv_write(ha, 0);
+		nv_cmd <<= 1;
+	}
+
+	/* Read data from NVRAM. */
+	for (cnt = 0; cnt < 16; cnt++) {
+		WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
+		NVRAM_DELAY();
+		data <<= 1;
+		reg_data = RD_REG_WORD(&reg->nvram);
+		if (reg_data & NVR_DATA_IN)
+			data |= BIT_0;
+		WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
+		NVRAM_DELAY();
+	}
+
+	/* Deselect chip. */
+	WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+
+	return data;
+}
+
+
+/**
+ * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
+ *	request routine to get the word from NVRAM.
+ * @ha: HA context
+ * @addr: Address in NVRAM to read
+ *
+ * Returns the word read from nvram @addr.
+ */
+static uint16_t
+qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
+{
+	uint16_t	data;
+	uint32_t	nv_cmd;
+
+	nv_cmd = addr << 16;
+	nv_cmd |= NV_READ_OP;
+	data = qla2x00_nvram_request(ha, nv_cmd);
+
+	return (data);
+}
+
+/**
+ * qla2x00_nv_deselect() - Deselect NVRAM operations.
+ * @ha: HA context
+ */
+static void
+qla2x00_nv_deselect(struct qla_hw_data *ha)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	NVRAM_DELAY();
+}
+
+/**
+ * qla2x00_write_nvram_word() - Write NVRAM data.
+ * @ha: HA context
+ * @addr: Address in NVRAM to write
+ * @data: word to program
+ */
+static void
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
+{
+	int count;
+	uint16_t word;
+	uint32_t nv_cmd, wait_cnt;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	qla2x00_nv_write(ha, NVR_DATA_OUT);
+	qla2x00_nv_write(ha, 0);
+	qla2x00_nv_write(ha, 0);
+
+	for (word = 0; word < 8; word++)
+		qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Write data */
+	nv_cmd = (addr << 16) | NV_WRITE_OP;
+	nv_cmd |= data;
+	nv_cmd <<= 5;
+	for (count = 0; count < 27; count++) {
+		if (nv_cmd & BIT_31)
+			qla2x00_nv_write(ha, NVR_DATA_OUT);
+		else
+			qla2x00_nv_write(ha, 0);
+
+		nv_cmd <<= 1;
+	}
+
+	qla2x00_nv_deselect(ha);
+
+	/* Wait for NVRAM to become ready */
+	WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	wait_cnt = NVR_WAIT_CNT;
+	do {
+		if (!--wait_cnt) {
+			ql_dbg(ql_dbg_user, vha, 0x708d,
+			    "NVRAM didn't go ready...\n");
+			break;
+		}
+		NVRAM_DELAY();
+		word = RD_REG_WORD(&reg->nvram);
+	} while ((word & NVR_DATA_IN) == 0);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Disable writes */
+	qla2x00_nv_write(ha, NVR_DATA_OUT);
+	for (count = 0; count < 10; count++)
+		qla2x00_nv_write(ha, 0);
+
+	qla2x00_nv_deselect(ha);
+}
+
+static int
+qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
+	uint16_t data, uint32_t tmo)
+{
+	int ret, count;
+	uint16_t word;
+	uint32_t nv_cmd;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	ret = QLA_SUCCESS;
+
+	qla2x00_nv_write(ha, NVR_DATA_OUT);
+	qla2x00_nv_write(ha, 0);
+	qla2x00_nv_write(ha, 0);
+
+	for (word = 0; word < 8; word++)
+		qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Write data */
+	nv_cmd = (addr << 16) | NV_WRITE_OP;
+	nv_cmd |= data;
+	nv_cmd <<= 5;
+	for (count = 0; count < 27; count++) {
+		if (nv_cmd & BIT_31)
+			qla2x00_nv_write(ha, NVR_DATA_OUT);
+		else
+			qla2x00_nv_write(ha, 0);
+
+		nv_cmd <<= 1;
+	}
+
+	qla2x00_nv_deselect(ha);
+
+	/* Wait for NVRAM to become ready */
+	WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	do {
+		NVRAM_DELAY();
+		word = RD_REG_WORD(&reg->nvram);
+		if (!--tmo) {
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+	} while ((word & NVR_DATA_IN) == 0);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Disable writes */
+	qla2x00_nv_write(ha, NVR_DATA_OUT);
+	for (count = 0; count < 10; count++)
+		qla2x00_nv_write(ha, 0);
+
+	qla2x00_nv_deselect(ha);
+
+	return ret;
+}
+
+/**
+ * qla2x00_clear_nvram_protection() -
+ * @ha: HA context
+ */
+static int
+qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
+{
+	int ret, stat;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint32_t word, wait_cnt;
+	uint16_t wprot, wprot_old;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	/* Clear NVRAM write protection. */
+	ret = QLA_FUNCTION_FAILED;
+
+	wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
+	stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
+					    cpu_to_le16(0x1234), 100000);
+	wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
+	if (stat != QLA_SUCCESS || wprot != 0x1234) {
+		/* Write enable. */
+		qla2x00_nv_write(ha, NVR_DATA_OUT);
+		qla2x00_nv_write(ha, 0);
+		qla2x00_nv_write(ha, 0);
+		for (word = 0; word < 8; word++)
+			qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+		qla2x00_nv_deselect(ha);
+
+		/* Enable protection register. */
+		qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+		qla2x00_nv_write(ha, NVR_PR_ENABLE);
+		qla2x00_nv_write(ha, NVR_PR_ENABLE);
+		for (word = 0; word < 8; word++)
+			qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE);
+
+		qla2x00_nv_deselect(ha);
+
+		/* Clear protection register (ffff is cleared). */
+		qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+		qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+		qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+		for (word = 0; word < 8; word++)
+			qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE);
+
+		qla2x00_nv_deselect(ha);
+
+		/* Wait for NVRAM to become ready. */
+		WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+		RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
+		wait_cnt = NVR_WAIT_CNT;
+		do {
+			if (!--wait_cnt) {
+				ql_dbg(ql_dbg_user, vha, 0x708e,
+				    "NVRAM didn't go ready...\n");
+				break;
+			}
+			NVRAM_DELAY();
+			word = RD_REG_WORD(&reg->nvram);
+		} while ((word & NVR_DATA_IN) == 0);
+
+		if (wait_cnt)
+			ret = QLA_SUCCESS;
+	} else
+		qla2x00_write_nvram_word(ha, ha->nvram_base, wprot_old);
+
+	return ret;
+}
+
+static void
+qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint32_t word, wait_cnt;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+	if (stat != QLA_SUCCESS)
+		return;
+
+	/* Set NVRAM write protection. */
+	/* Write enable. */
+	qla2x00_nv_write(ha, NVR_DATA_OUT);
+	qla2x00_nv_write(ha, 0);
+	qla2x00_nv_write(ha, 0);
+	for (word = 0; word < 8; word++)
+		qla2x00_nv_write(ha, NVR_DATA_OUT);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Enable protection register. */
+	qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+	qla2x00_nv_write(ha, NVR_PR_ENABLE);
+	qla2x00_nv_write(ha, NVR_PR_ENABLE);
+	for (word = 0; word < 8; word++)
+		qla2x00_nv_write(ha, NVR_DATA_OUT | NVR_PR_ENABLE);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Enable protection register. */
+	qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+	qla2x00_nv_write(ha, NVR_PR_ENABLE);
+	qla2x00_nv_write(ha, NVR_PR_ENABLE | NVR_DATA_OUT);
+	for (word = 0; word < 8; word++)
+		qla2x00_nv_write(ha, NVR_PR_ENABLE);
+
+	qla2x00_nv_deselect(ha);
+
+	/* Wait for NVRAM to become ready. */
+	WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	wait_cnt = NVR_WAIT_CNT;
+	do {
+		if (!--wait_cnt) {
+			ql_dbg(ql_dbg_user, vha, 0x708f,
+			    "NVRAM didn't go ready...\n");
+			break;
+		}
+		NVRAM_DELAY();
+		word = RD_REG_WORD(&reg->nvram);
+	} while ((word & NVR_DATA_IN) == 0);
+}
+
+
+/*****************************************************************************/
+/* Flash Manipulation Routines                                               */
+/*****************************************************************************/
+
+static inline uint32_t
+flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
+{
+	return ha->flash_conf_off | faddr;
+}
+
+static inline uint32_t
+flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
+{
+	return ha->flash_data_off | faddr;
+}
+
+static inline uint32_t
+nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
+{
+	return ha->nvram_conf_off | naddr;
+}
+
+static inline uint32_t
+nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
+{
+	return ha->nvram_data_off | naddr;
+}
+
+static uint32_t
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
+{
+	int rval;
+	uint32_t cnt, data;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
+	/* Wait for READ cycle to complete. */
+	rval = QLA_SUCCESS;
+	for (cnt = 3000;
+	    (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) == 0 &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt)
+			udelay(10);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+		cond_resched();
+	}
+
+	/* TODO: What happens if we time out? */
+	data = 0xDEADDEAD;
+	if (rval == QLA_SUCCESS)
+		data = RD_REG_DWORD(&reg->flash_data);
+
+	return data;
+}
+
+uint32_t *
+qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+    uint32_t dwords)
+{
+	uint32_t i;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Dword reads to flash. */
+	for (i = 0; i < dwords; i++, faddr++)
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		    flash_data_addr(ha, faddr)));
+
+	return dwptr;
+}
+
+static int
+qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
+{
+	int rval;
+	uint32_t cnt;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	WRT_REG_DWORD(&reg->flash_data, data);
+	RD_REG_DWORD(&reg->flash_data);		/* PCI Posting. */
+	WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
+	/* Wait for Write cycle to complete. */
+	rval = QLA_SUCCESS;
+	for (cnt = 500000; (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) &&
+	    rval == QLA_SUCCESS; cnt--) {
+		if (cnt)
+			udelay(10);
+		else
+			rval = QLA_FUNCTION_TIMEOUT;
+		cond_resched();
+	}
+	return rval;
+}
+
+static void
+qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
+    uint8_t *flash_id)
+{
+	uint32_t ids;
+
+	ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
+	*man_id = LSB(ids);
+	*flash_id = MSB(ids);
+
+	/* Check if man_id and flash_id are valid. */
+	if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) {
+		/* Read information using 0x9f opcode
+		 * Device ID, Mfg ID would be read in the format:
+		 *   <Ext Dev Info><Device ID Part2><Device ID Part 1><Mfg ID>
+		 * Example: ATMEL 0x00 01 45 1F
+		 * Extract MFG and Dev ID from last two bytes.
+		 */
+		ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
+		*man_id = LSB(ids);
+		*flash_id = MSB(ids);
+	}
+}
+
+static int
+qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+{
+	const char *loc, *locations[] = { "DEF", "PCI" };
+	uint32_t pcihdr, pcids;
+	uint32_t *dcode;
+	uint8_t *buf, *bcode, last_image;
+	uint16_t cnt, chksum, *wptr;
+	struct qla_flt_location *fltl;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	/*
+	 * FLT-location structure resides after the last PCI region.
+	 */
+
+	/* Begin with sane defaults. */
+	loc = locations[0];
+	*start = 0;
+	if (IS_QLA24XX_TYPE(ha))
+		*start = FA_FLASH_LAYOUT_ADDR_24;
+	else if (IS_QLA25XX(ha))
+		*start = FA_FLASH_LAYOUT_ADDR;
+	else if (IS_QLA81XX(ha))
+		*start = FA_FLASH_LAYOUT_ADDR_81;
+	else if (IS_P3P_TYPE(ha)) {
+		*start = FA_FLASH_LAYOUT_ADDR_82;
+		goto end;
+	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		*start = FA_FLASH_LAYOUT_ADDR_83;
+		goto end;
+	}
+	/* Begin with first PCI expansion ROM header. */
+	buf = (uint8_t *)req->ring;
+	dcode = (uint32_t *)req->ring;
+	pcihdr = 0;
+	last_image = 1;
+	do {
+		/* Verify PCI expansion ROM header. */
+		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
+		bcode = buf + (pcihdr % 4);
+		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
+			goto end;
+
+		/* Locate PCI data structure. */
+		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+		qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
+		bcode = buf + (pcihdr % 4);
+
+		/* Validate signature of PCI data structure. */
+		if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+		    bcode[0x2] != 'I' || bcode[0x3] != 'R')
+			goto end;
+
+		last_image = bcode[0x15] & BIT_7;
+
+		/* Locate next PCI expansion ROM. */
+		pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+	} while (!last_image);
+
+	/* Now verify FLT-location structure. */
+	fltl = (struct qla_flt_location *)req->ring;
+	qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
+	    sizeof(struct qla_flt_location) >> 2);
+	if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
+	    fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+		goto end;
+
+	wptr = (uint16_t *)req->ring;
+	cnt = sizeof(struct qla_flt_location) >> 1;
+	for (chksum = 0; cnt--; wptr++)
+		chksum += le16_to_cpu(*wptr);
+	if (chksum) {
+		ql_log(ql_log_fatal, vha, 0x0045,
+		    "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
+		    buf, sizeof(struct qla_flt_location));
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Good data.  Use specified location. */
+	loc = locations[1];
+	*start = (le16_to_cpu(fltl->start_hi) << 16 |
+	    le16_to_cpu(fltl->start_lo)) >> 2;
+end:
+	ql_dbg(ql_dbg_init, vha, 0x0046,
+	    "FLTL[%s] = 0x%x.\n",
+	    loc, *start);
+	return QLA_SUCCESS;
+}
+
+static void
+qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
+{
+	const char *loc, *locations[] = { "DEF", "FLT" };
+	const uint32_t def_fw[] =
+		{ FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
+	const uint32_t def_boot[] =
+		{ FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
+	const uint32_t def_vpd_nvram[] =
+		{ FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
+	const uint32_t def_vpd0[] =
+		{ 0, 0, FA_VPD0_ADDR_81 };
+	const uint32_t def_vpd1[] =
+		{ 0, 0, FA_VPD1_ADDR_81 };
+	const uint32_t def_nvram0[] =
+		{ 0, 0, FA_NVRAM0_ADDR_81 };
+	const uint32_t def_nvram1[] =
+		{ 0, 0, FA_NVRAM1_ADDR_81 };
+	const uint32_t def_fdt[] =
+		{ FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
+			FA_FLASH_DESCR_ADDR_81 };
+	const uint32_t def_npiv_conf0[] =
+		{ FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR,
+			FA_NPIV_CONF0_ADDR_81 };
+	const uint32_t def_npiv_conf1[] =
+		{ FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
+			FA_NPIV_CONF1_ADDR_81 };
+	const uint32_t fcp_prio_cfg0[] =
+		{ FA_FCP_PRIO0_ADDR, FA_FCP_PRIO0_ADDR_25,
+			0 };
+	const uint32_t fcp_prio_cfg1[] =
+		{ FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25,
+			0 };
+	uint32_t def;
+	uint16_t *wptr;
+	uint16_t cnt, chksum;
+	uint32_t start;
+	struct qla_flt_header *flt;
+	struct qla_flt_region *region;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	def = 0;
+	if (IS_QLA25XX(ha))
+		def = 1;
+	else if (IS_QLA81XX(ha))
+		def = 2;
+
+	/* Assign FCP prio region since older adapters may not have FLT, or
+	   FCP prio region in it's FLT.
+	 */
+	ha->flt_region_fcp_prio = (ha->port_no == 0) ?
+	    fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
+
+	ha->flt_region_flt = flt_addr;
+	wptr = (uint16_t *)req->ring;
+	flt = (struct qla_flt_header *)req->ring;
+	region = (struct qla_flt_region *)&flt[1];
+	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+	    flt_addr << 2, OPTROM_BURST_SIZE);
+	if (*wptr == cpu_to_le16(0xffff))
+		goto no_flash_data;
+	if (flt->version != cpu_to_le16(1)) {
+		ql_log(ql_log_warn, vha, 0x0047,
+		    "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
+		    le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+		    le16_to_cpu(flt->checksum));
+		goto no_flash_data;
+	}
+
+	cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+	for (chksum = 0; cnt--; wptr++)
+		chksum += le16_to_cpu(*wptr);
+	if (chksum) {
+		ql_log(ql_log_fatal, vha, 0x0048,
+		    "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
+		    le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+		    le16_to_cpu(flt->checksum));
+		goto no_flash_data;
+	}
+
+	loc = locations[1];
+	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+	for ( ; cnt; cnt--, region++) {
+		/* Store addresses as DWORD offsets. */
+		start = le32_to_cpu(region->start) >> 2;
+		ql_dbg(ql_dbg_init, vha, 0x0049,
+		    "FLT[%02x]: start=0x%x "
+		    "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
+		    start, le32_to_cpu(region->end) >> 2,
+		    le32_to_cpu(region->size));
+
+		switch (le32_to_cpu(region->code) & 0xff) {
+		case FLT_REG_FCOE_FW:
+			if (!IS_QLA8031(ha))
+				break;
+			ha->flt_region_fw = start;
+			break;
+		case FLT_REG_FW:
+			if (IS_QLA8031(ha))
+				break;
+			ha->flt_region_fw = start;
+			break;
+		case FLT_REG_BOOT_CODE:
+			ha->flt_region_boot = start;
+			break;
+		case FLT_REG_VPD_0:
+			if (IS_QLA8031(ha))
+				break;
+			ha->flt_region_vpd_nvram = start;
+			if (IS_P3P_TYPE(ha))
+				break;
+			if (ha->port_no == 0)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_VPD_1:
+			if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+				break;
+			if (ha->port_no == 1)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_VPD_2:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 2)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_VPD_3:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 3)
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_NVRAM_0:
+			if (IS_QLA8031(ha))
+				break;
+			if (ha->port_no == 0)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_NVRAM_1:
+			if (IS_QLA8031(ha))
+				break;
+			if (ha->port_no == 1)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_NVRAM_2:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 2)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_NVRAM_3:
+			if (!IS_QLA27XX(ha))
+				break;
+			if (ha->port_no == 3)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_FDT:
+			ha->flt_region_fdt = start;
+			break;
+		case FLT_REG_NPIV_CONF_0:
+			if (ha->port_no == 0)
+				ha->flt_region_npiv_conf = start;
+			break;
+		case FLT_REG_NPIV_CONF_1:
+			if (ha->port_no == 1)
+				ha->flt_region_npiv_conf = start;
+			break;
+		case FLT_REG_GOLD_FW:
+			ha->flt_region_gold_fw = start;
+			break;
+		case FLT_REG_FCP_PRIO_0:
+			if (ha->port_no == 0)
+				ha->flt_region_fcp_prio = start;
+			break;
+		case FLT_REG_FCP_PRIO_1:
+			if (ha->port_no == 1)
+				ha->flt_region_fcp_prio = start;
+			break;
+		case FLT_REG_BOOT_CODE_82XX:
+			ha->flt_region_boot = start;
+			break;
+		case FLT_REG_BOOT_CODE_8044:
+			if (IS_QLA8044(ha))
+				ha->flt_region_boot = start;
+			break;
+		case FLT_REG_FW_82XX:
+			ha->flt_region_fw = start;
+			break;
+		case FLT_REG_CNA_FW:
+			if (IS_CNA_CAPABLE(ha))
+				ha->flt_region_fw = start;
+			break;
+		case FLT_REG_GOLD_FW_82XX:
+			ha->flt_region_gold_fw = start;
+			break;
+		case FLT_REG_BOOTLOAD_82XX:
+			ha->flt_region_bootload = start;
+			break;
+		case FLT_REG_VPD_8XXX:
+			if (IS_CNA_CAPABLE(ha))
+				ha->flt_region_vpd = start;
+			break;
+		case FLT_REG_FCOE_NVRAM_0:
+			if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
+				break;
+			if (ha->port_no == 0)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_FCOE_NVRAM_1:
+			if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
+				break;
+			if (ha->port_no == 1)
+				ha->flt_region_nvram = start;
+			break;
+		case FLT_REG_IMG_PRI_27XX:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_img_status_pri = start;
+			break;
+		case FLT_REG_IMG_SEC_27XX:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_img_status_sec = start;
+			break;
+		case FLT_REG_FW_SEC_27XX:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_fw_sec = start;
+			break;
+		case FLT_REG_BOOTLOAD_SEC_27XX:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_boot_sec = start;
+			break;
+		case FLT_REG_VPD_SEC_27XX_0:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_vpd_sec = start;
+			break;
+		case FLT_REG_VPD_SEC_27XX_1:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_vpd_sec = start;
+			break;
+		case FLT_REG_VPD_SEC_27XX_2:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_vpd_sec = start;
+			break;
+		case FLT_REG_VPD_SEC_27XX_3:
+			if (IS_QLA27XX(ha))
+				ha->flt_region_vpd_sec = start;
+			break;
+		}
+	}
+	goto done;
+
+no_flash_data:
+	/* Use hardcoded defaults. */
+	loc = locations[0];
+	ha->flt_region_fw = def_fw[def];
+	ha->flt_region_boot = def_boot[def];
+	ha->flt_region_vpd_nvram = def_vpd_nvram[def];
+	ha->flt_region_vpd = (ha->port_no == 0) ?
+	    def_vpd0[def] : def_vpd1[def];
+	ha->flt_region_nvram = (ha->port_no == 0) ?
+	    def_nvram0[def] : def_nvram1[def];
+	ha->flt_region_fdt = def_fdt[def];
+	ha->flt_region_npiv_conf = (ha->port_no == 0) ?
+	    def_npiv_conf0[def] : def_npiv_conf1[def];
+done:
+	ql_dbg(ql_dbg_init, vha, 0x004a,
+	    "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x "
+	    "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+	    loc, ha->flt_region_boot, ha->flt_region_fw,
+	    ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
+	    ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf,
+	    ha->flt_region_fcp_prio);
+}
+
+static void
+qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
+{
+#define FLASH_BLK_SIZE_4K	0x1000
+#define FLASH_BLK_SIZE_32K	0x8000
+#define FLASH_BLK_SIZE_64K	0x10000
+	const char *loc, *locations[] = { "MID", "FDT" };
+	uint16_t cnt, chksum;
+	uint16_t *wptr;
+	struct qla_fdt_layout *fdt;
+	uint8_t	man_id, flash_id;
+	uint16_t mid = 0, fid = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	wptr = (uint16_t *)req->ring;
+	fdt = (struct qla_fdt_layout *)req->ring;
+	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+	    ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
+	if (*wptr == cpu_to_le16(0xffff))
+		goto no_flash_data;
+	if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
+	    fdt->sig[3] != 'D')
+		goto no_flash_data;
+
+	for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
+		chksum += le16_to_cpu(*wptr);
+	if (chksum) {
+		ql_dbg(ql_dbg_init, vha, 0x004c,
+		    "Inconsistent FDT detected:"
+		    " checksum=0x%x id=%c version0x%x.\n", chksum,
+		    fdt->sig[0], le16_to_cpu(fdt->version));
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
+		    (uint8_t *)fdt, sizeof(*fdt));
+		goto no_flash_data;
+	}
+
+	loc = locations[1];
+	mid = le16_to_cpu(fdt->man_id);
+	fid = le16_to_cpu(fdt->id);
+	ha->fdt_wrt_disable = fdt->wrt_disable_bits;
+	ha->fdt_wrt_enable = fdt->wrt_enable_bits;
+	ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd;
+	if (IS_QLA8044(ha))
+		ha->fdt_erase_cmd = fdt->erase_cmd;
+	else
+		ha->fdt_erase_cmd =
+		    flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
+	ha->fdt_block_size = le32_to_cpu(fdt->block_size);
+	if (fdt->unprotect_sec_cmd) {
+		ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
+		    fdt->unprotect_sec_cmd);
+		ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
+		    flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
+		    flash_conf_addr(ha, 0x0336);
+	}
+	goto done;
+no_flash_data:
+	loc = locations[0];
+	if (IS_P3P_TYPE(ha)) {
+		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		goto done;
+	}
+	qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
+	mid = man_id;
+	fid = flash_id;
+	ha->fdt_wrt_disable = 0x9c;
+	ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8);
+	switch (man_id) {
+	case 0xbf: /* STT flash. */
+		if (flash_id == 0x8e)
+			ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		else
+			ha->fdt_block_size = FLASH_BLK_SIZE_32K;
+
+		if (flash_id == 0x80)
+			ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352);
+		break;
+	case 0x13: /* ST M25P80. */
+		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		break;
+	case 0x1f: /* Atmel 26DF081A. */
+		ha->fdt_block_size = FLASH_BLK_SIZE_4K;
+		ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320);
+		ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339);
+		ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336);
+		break;
+	default:
+		/* Default to 64 kb sector size. */
+		ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+		break;
+	}
+done:
+	ql_dbg(ql_dbg_init, vha, 0x004d,
+	    "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+	    "pr=%x wrtd=0x%x blk=0x%x.\n",
+	    loc, mid, fid,
+	    ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
+	    ha->fdt_wrt_disable, ha->fdt_block_size);
+
+}
+
+static void
+qla2xxx_get_idc_param(scsi_qla_host_t *vha)
+{
+#define QLA82XX_IDC_PARAM_ADDR       0x003e885c
+	uint32_t *wptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	if (!(IS_P3P_TYPE(ha)))
+		return;
+
+	wptr = (uint32_t *)req->ring;
+	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
+		QLA82XX_IDC_PARAM_ADDR , 8);
+
+	if (*wptr == cpu_to_le32(0xffffffff)) {
+		ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
+		ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
+	} else {
+		ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr);
+		wptr++;
+		ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
+	}
+	ql_dbg(ql_dbg_init, vha, 0x004e,
+	    "fcoe_dev_init_timeout=%d "
+	    "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout,
+	    ha->fcoe_reset_timeout);
+	return;
+}
+
+int
+qla2xxx_get_flash_info(scsi_qla_host_t *vha)
+{
+	int ret;
+	uint32_t flt_addr;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
+		return QLA_SUCCESS;
+
+	ret = qla2xxx_find_flt_start(vha, &flt_addr);
+	if (ret != QLA_SUCCESS)
+		return ret;
+
+	qla2xxx_get_flt_info(vha, flt_addr);
+	qla2xxx_get_fdt_info(vha);
+	qla2xxx_get_idc_param(vha);
+
+	return QLA_SUCCESS;
+}
+
+void
+qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
+{
+#define NPIV_CONFIG_SIZE	(16*1024)
+	void *data;
+	uint16_t *wptr;
+	uint16_t cnt, chksum;
+	int i;
+	struct qla_npiv_header hdr;
+	struct qla_npiv_entry *entry;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+		return;
+
+	if (ha->flags.nic_core_reset_hdlr_active)
+		return;
+
+	if (IS_QLA8044(ha))
+		return;
+
+	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
+	    ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+	if (hdr.version == cpu_to_le16(0xffff))
+		return;
+	if (hdr.version != cpu_to_le16(1)) {
+		ql_dbg(ql_dbg_user, vha, 0x7090,
+		    "Unsupported NPIV-Config "
+		    "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+		    le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+		    le16_to_cpu(hdr.checksum));
+		return;
+	}
+
+	data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
+	if (!data) {
+		ql_log(ql_log_warn, vha, 0x7091,
+		    "Unable to allocate memory for data.\n");
+		return;
+	}
+
+	ha->isp_ops->read_optrom(vha, (uint8_t *)data,
+	    ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+
+	cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
+	for (wptr = data, chksum = 0; cnt--; wptr++)
+		chksum += le16_to_cpu(*wptr);
+	if (chksum) {
+		ql_dbg(ql_dbg_user, vha, 0x7092,
+		    "Inconsistent NPIV-Config "
+		    "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+		    le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+		    le16_to_cpu(hdr.checksum));
+		goto done;
+	}
+
+	entry = data + sizeof(struct qla_npiv_header);
+	cnt = le16_to_cpu(hdr.entries);
+	for (i = 0; cnt; cnt--, entry++, i++) {
+		uint16_t flags;
+		struct fc_vport_identifiers vid;
+		struct fc_vport *vport;
+
+		memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
+
+		flags = le16_to_cpu(entry->flags);
+		if (flags == 0xffff)
+			continue;
+		if ((flags & BIT_0) == 0)
+			continue;
+
+		memset(&vid, 0, sizeof(vid));
+		vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+		vid.vport_type = FC_PORTTYPE_NPIV;
+		vid.disable = false;
+		vid.port_name = wwn_to_u64(entry->port_name);
+		vid.node_name = wwn_to_u64(entry->node_name);
+
+		ql_dbg(ql_dbg_user, vha, 0x7093,
+		    "NPIV[%02x]: wwpn=%llx "
+		    "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
+		    (unsigned long long)vid.port_name,
+		    (unsigned long long)vid.node_name,
+		    le16_to_cpu(entry->vf_id),
+		    entry->q_qos, entry->f_qos);
+
+		if (i < QLA_PRECONFIG_VPORTS) {
+			vport = fc_vport_create(vha->host, 0, &vid);
+			if (!vport)
+				ql_log(ql_log_warn, vha, 0x7094,
+				    "NPIV-Config Failed to create vport [%02x]: "
+				    "wwpn=%llx wwnn=%llx.\n", cnt,
+				    (unsigned long long)vid.port_name,
+				    (unsigned long long)vid.node_name);
+		}
+	}
+done:
+	kfree(data);
+}
+
+static int
+qla24xx_unprotect_flash(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (ha->flags.fac_supported)
+		return qla81xx_fac_do_write_enable(vha, 1);
+
+	/* Enable flash write. */
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+
+	if (!ha->fdt_wrt_disable)
+		goto done;
+
+	/* Disable flash write-protection, first clear SR protection bit */
+	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
+	/* Then write zero again to clear remaining SR bits.*/
+	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
+done:
+	return QLA_SUCCESS;
+}
+
+static int
+qla24xx_protect_flash(scsi_qla_host_t *vha)
+{
+	uint32_t cnt;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (ha->flags.fac_supported)
+		return qla81xx_fac_do_write_enable(vha, 0);
+
+	if (!ha->fdt_wrt_disable)
+		goto skip_wrt_protect;
+
+	/* Enable flash write-protection and wait for completion. */
+	qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
+	    ha->fdt_wrt_disable);
+	for (cnt = 300; cnt &&
+	    qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
+	    cnt--) {
+		udelay(10);
+	}
+
+skip_wrt_protect:
+	/* Disable flash write. */
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+
+	return QLA_SUCCESS;
+}
+
+static int
+qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t start, finish;
+
+	if (ha->flags.fac_supported) {
+		start = fdata >> 2;
+		finish = start + (ha->fdt_block_size >> 2) - 1;
+		return qla81xx_fac_erase_sector(vha, flash_data_addr(ha,
+		    start), flash_data_addr(ha, finish));
+	}
+
+	return qla24xx_write_flash_dword(ha, ha->fdt_erase_cmd,
+	    (fdata & 0xff00) | ((fdata << 16) & 0xff0000) |
+	    ((fdata >> 16) & 0xff));
+}
+
+static int
+qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+    uint32_t dwords)
+{
+	int ret;
+	uint32_t liter;
+	uint32_t sec_mask, rest_addr;
+	uint32_t fdata;
+	dma_addr_t optrom_dma;
+	void *optrom = NULL;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Prepare burst-capable write on supported ISPs. */
+	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+	    IS_QLA27XX(ha)) &&
+	    !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
+		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+		    &optrom_dma, GFP_KERNEL);
+		if (!optrom) {
+			ql_log(ql_log_warn, vha, 0x7095,
+			    "Unable to allocate "
+			    "memory for optrom burst write (%x KB).\n",
+			    OPTROM_BURST_SIZE / 1024);
+		}
+	}
+
+	rest_addr = (ha->fdt_block_size >> 2) - 1;
+	sec_mask = ~rest_addr;
+
+	ret = qla24xx_unprotect_flash(vha);
+	if (ret != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x7096,
+		    "Unable to unprotect flash for update.\n");
+		goto done;
+	}
+
+	for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
+		fdata = (faddr & sec_mask) << 2;
+
+		/* Are we at the beginning of a sector? */
+		if ((faddr & rest_addr) == 0) {
+			/* Do sector unprotect. */
+			if (ha->fdt_unprotect_sec_cmd)
+				qla24xx_write_flash_dword(ha,
+				    ha->fdt_unprotect_sec_cmd,
+				    (fdata & 0xff00) | ((fdata << 16) &
+				    0xff0000) | ((fdata >> 16) & 0xff));
+			ret = qla24xx_erase_sector(vha, fdata);
+			if (ret != QLA_SUCCESS) {
+				ql_dbg(ql_dbg_user, vha, 0x7007,
+				    "Unable to erase erase sector: address=%x.\n",
+				    faddr);
+				break;
+			}
+		}
+
+		/* Go with burst-write. */
+		if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
+			/* Copy data to DMA'ble buffer. */
+			memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
+
+			ret = qla2x00_load_ram(vha, optrom_dma,
+			    flash_data_addr(ha, faddr),
+			    OPTROM_BURST_DWORDS);
+			if (ret != QLA_SUCCESS) {
+				ql_log(ql_log_warn, vha, 0x7097,
+				    "Unable to burst-write optrom segment "
+				    "(%x/%x/%llx).\n", ret,
+				    flash_data_addr(ha, faddr),
+				    (unsigned long long)optrom_dma);
+				ql_log(ql_log_warn, vha, 0x7098,
+				    "Reverting to slow-write.\n");
+
+				dma_free_coherent(&ha->pdev->dev,
+				    OPTROM_BURST_SIZE, optrom, optrom_dma);
+				optrom = NULL;
+			} else {
+				liter += OPTROM_BURST_DWORDS - 1;
+				faddr += OPTROM_BURST_DWORDS - 1;
+				dwptr += OPTROM_BURST_DWORDS - 1;
+				continue;
+			}
+		}
+
+		ret = qla24xx_write_flash_dword(ha,
+		    flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
+		if (ret != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_user, vha, 0x7006,
+			    "Unable to program flash address=%x data=%x.\n",
+			    faddr, *dwptr);
+			break;
+		}
+
+		/* Do sector protect. */
+		if (ha->fdt_unprotect_sec_cmd &&
+		    ((faddr & rest_addr) == rest_addr))
+			qla24xx_write_flash_dword(ha,
+			    ha->fdt_protect_sec_cmd,
+			    (fdata & 0xff00) | ((fdata << 16) &
+			    0xff0000) | ((fdata >> 16) & 0xff));
+	}
+
+	ret = qla24xx_protect_flash(vha);
+	if (ret != QLA_SUCCESS)
+		ql_log(ql_log_warn, vha, 0x7099,
+		    "Unable to protect flash after update.\n");
+done:
+	if (optrom)
+		dma_free_coherent(&ha->pdev->dev,
+		    OPTROM_BURST_SIZE, optrom, optrom_dma);
+
+	return ret;
+}
+
+uint8_t *
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+    uint32_t bytes)
+{
+	uint32_t i;
+	uint16_t *wptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Word reads to NVRAM via registers. */
+	wptr = (uint16_t *)buf;
+	qla2x00_lock_nvram_access(ha);
+	for (i = 0; i < bytes >> 1; i++, naddr++)
+		wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha,
+		    naddr));
+	qla2x00_unlock_nvram_access(ha);
+
+	return buf;
+}
+
+uint8_t *
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+    uint32_t bytes)
+{
+	uint32_t i;
+	uint32_t *dwptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_P3P_TYPE(ha))
+		return  buf;
+
+	/* Dword reads to flash. */
+	dwptr = (uint32_t *)buf;
+	for (i = 0; i < bytes >> 2; i++, naddr++)
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		    nvram_data_addr(ha, naddr)));
+
+	return buf;
+}
+
+int
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+    uint32_t bytes)
+{
+	int ret, stat;
+	uint32_t i;
+	uint16_t *wptr;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+
+	ret = QLA_SUCCESS;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qla2x00_lock_nvram_access(ha);
+
+	/* Disable NVRAM write-protection. */
+	stat = qla2x00_clear_nvram_protection(ha);
+
+	wptr = (uint16_t *)buf;
+	for (i = 0; i < bytes >> 1; i++, naddr++) {
+		qla2x00_write_nvram_word(ha, naddr,
+		    cpu_to_le16(*wptr));
+		wptr++;
+	}
+
+	/* Enable NVRAM write-protection. */
+	qla2x00_set_nvram_protection(ha, stat);
+
+	qla2x00_unlock_nvram_access(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return ret;
+}
+
+int
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+    uint32_t bytes)
+{
+	int ret;
+	uint32_t i;
+	uint32_t *dwptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	ret = QLA_SUCCESS;
+
+	if (IS_P3P_TYPE(ha))
+		return ret;
+
+	/* Enable flash write. */
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+
+	/* Disable NVRAM write-protection. */
+	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
+	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
+
+	/* Dword writes to flash. */
+	dwptr = (uint32_t *)buf;
+	for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
+		ret = qla24xx_write_flash_dword(ha,
+		    nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
+		if (ret != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_user, vha, 0x709a,
+			    "Unable to program nvram address=%x data=%x.\n",
+			    naddr, *dwptr);
+			break;
+		}
+	}
+
+	/* Enable NVRAM write-protection. */
+	qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
+
+	/* Disable flash write. */
+	WRT_REG_DWORD(&reg->ctrl_status,
+	    RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+	RD_REG_DWORD(&reg->ctrl_status);	/* PCI Posting. */
+
+	return ret;
+}
+
+uint8_t *
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+    uint32_t bytes)
+{
+	uint32_t i;
+	uint32_t *dwptr;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Dword reads to flash. */
+	dwptr = (uint32_t *)buf;
+	for (i = 0; i < bytes >> 2; i++, naddr++)
+		dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+		    flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
+
+	return buf;
+}
+
+int
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
+    uint32_t bytes)
+{
+	struct qla_hw_data *ha = vha->hw;
+#define RMW_BUFFER_SIZE	(64 * 1024)
+	uint8_t *dbuf;
+
+	dbuf = vmalloc(RMW_BUFFER_SIZE);
+	if (!dbuf)
+		return QLA_MEMORY_ALLOC_FAILED;
+	ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
+	    RMW_BUFFER_SIZE);
+	memcpy(dbuf + (naddr << 2), buf, bytes);
+	ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
+	    RMW_BUFFER_SIZE);
+	vfree(dbuf);
+
+	return QLA_SUCCESS;
+}
+
+static inline void
+qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
+{
+	if (IS_QLA2322(ha)) {
+		/* Flip all colors. */
+		if (ha->beacon_color_state == QLA_LED_ALL_ON) {
+			/* Turn off. */
+			ha->beacon_color_state = 0;
+			*pflags = GPIO_LED_ALL_OFF;
+		} else {
+			/* Turn on. */
+			ha->beacon_color_state = QLA_LED_ALL_ON;
+			*pflags = GPIO_LED_RGA_ON;
+		}
+	} else {
+		/* Flip green led only. */
+		if (ha->beacon_color_state == QLA_LED_GRN_ON) {
+			/* Turn off. */
+			ha->beacon_color_state = 0;
+			*pflags = GPIO_LED_GREEN_OFF_AMBER_OFF;
+		} else {
+			/* Turn on. */
+			ha->beacon_color_state = QLA_LED_GRN_ON;
+			*pflags = GPIO_LED_GREEN_ON_AMBER_OFF;
+		}
+	}
+}
+
+#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
+
+void
+qla2x00_beacon_blink(struct scsi_qla_host *vha)
+{
+	uint16_t gpio_enable;
+	uint16_t gpio_data;
+	uint16_t led_color = 0;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	if (IS_P3P_TYPE(ha))
+		return;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Save the Original GPIOE. */
+	if (ha->pio_address) {
+		gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
+		gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
+	} else {
+		gpio_enable = RD_REG_WORD(&reg->gpioe);
+		gpio_data = RD_REG_WORD(&reg->gpiod);
+	}
+
+	/* Set the modified gpio_enable values */
+	gpio_enable |= GPIO_LED_MASK;
+
+	if (ha->pio_address) {
+		WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
+	} else {
+		WRT_REG_WORD(&reg->gpioe, gpio_enable);
+		RD_REG_WORD(&reg->gpioe);
+	}
+
+	qla2x00_flip_colors(ha, &led_color);
+
+	/* Clear out any previously set LED color. */
+	gpio_data &= ~GPIO_LED_MASK;
+
+	/* Set the new input LED color to GPIOD. */
+	gpio_data |= led_color;
+
+	/* Set the modified gpio_data values */
+	if (ha->pio_address) {
+		WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
+	} else {
+		WRT_REG_WORD(&reg->gpiod, gpio_data);
+		RD_REG_WORD(&reg->gpiod);
+	}
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+int
+qla2x00_beacon_on(struct scsi_qla_host *vha)
+{
+	uint16_t gpio_enable;
+	uint16_t gpio_data;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
+	ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
+
+	if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x709b,
+		    "Unable to update fw options (beacon on).\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	/* Turn off LEDs. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	if (ha->pio_address) {
+		gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
+		gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
+	} else {
+		gpio_enable = RD_REG_WORD(&reg->gpioe);
+		gpio_data = RD_REG_WORD(&reg->gpiod);
+	}
+	gpio_enable |= GPIO_LED_MASK;
+
+	/* Set the modified gpio_enable values. */
+	if (ha->pio_address) {
+		WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
+	} else {
+		WRT_REG_WORD(&reg->gpioe, gpio_enable);
+		RD_REG_WORD(&reg->gpioe);
+	}
+
+	/* Clear out previously set LED colour. */
+	gpio_data &= ~GPIO_LED_MASK;
+	if (ha->pio_address) {
+		WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
+	} else {
+		WRT_REG_WORD(&reg->gpiod, gpio_data);
+		RD_REG_WORD(&reg->gpiod);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	/*
+	 * Let the per HBA timer kick off the blinking process based on
+	 * the following flags. No need to do anything else now.
+	 */
+	ha->beacon_blink_led = 1;
+	ha->beacon_color_state = 0;
+
+	return QLA_SUCCESS;
+}
+
+int
+qla2x00_beacon_off(struct scsi_qla_host *vha)
+{
+	int rval = QLA_SUCCESS;
+	struct qla_hw_data *ha = vha->hw;
+
+	ha->beacon_blink_led = 0;
+
+	/* Set the on flag so when it gets flipped it will be off. */
+	if (IS_QLA2322(ha))
+		ha->beacon_color_state = QLA_LED_ALL_ON;
+	else
+		ha->beacon_color_state = QLA_LED_GRN_ON;
+
+	ha->isp_ops->beacon_blink(vha);	/* This turns green LED off */
+
+	ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
+	ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
+
+	rval = qla2x00_set_fw_options(vha, ha->fw_options);
+	if (rval != QLA_SUCCESS)
+		ql_log(ql_log_warn, vha, 0x709c,
+		    "Unable to update fw options (beacon off).\n");
+	return rval;
+}
+
+
+static inline void
+qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
+{
+	/* Flip all colors. */
+	if (ha->beacon_color_state == QLA_LED_ALL_ON) {
+		/* Turn off. */
+		ha->beacon_color_state = 0;
+		*pflags = 0;
+	} else {
+		/* Turn on. */
+		ha->beacon_color_state = QLA_LED_ALL_ON;
+		*pflags = GPDX_LED_YELLOW_ON | GPDX_LED_AMBER_ON;
+	}
+}
+
+void
+qla24xx_beacon_blink(struct scsi_qla_host *vha)
+{
+	uint16_t led_color = 0;
+	uint32_t gpio_data;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	/* Save the Original GPIOD. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+	/* Enable the gpio_data reg for update. */
+	gpio_data |= GPDX_LED_UPDATE_MASK;
+
+	WRT_REG_DWORD(&reg->gpiod, gpio_data);
+	gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+	/* Set the color bits. */
+	qla24xx_flip_colors(ha, &led_color);
+
+	/* Clear out any previously set LED color. */
+	gpio_data &= ~GPDX_LED_COLOR_MASK;
+
+	/* Set the new input LED color to GPIOD. */
+	gpio_data |= led_color;
+
+	/* Set the modified gpio_data values. */
+	WRT_REG_DWORD(&reg->gpiod, gpio_data);
+	gpio_data = RD_REG_DWORD(&reg->gpiod);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static uint32_t
+qla83xx_select_led_port(struct qla_hw_data *ha)
+{
+	uint32_t led_select_value = 0;
+
+	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+		goto out;
+
+	if (ha->port_no == 0)
+		led_select_value = QLA83XX_LED_PORT0;
+	else
+		led_select_value = QLA83XX_LED_PORT1;
+
+out:
+	return led_select_value;
+}
+
+void
+qla83xx_beacon_blink(struct scsi_qla_host *vha)
+{
+	uint32_t led_select_value;
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t led_cfg[6];
+	uint16_t orig_led_cfg[6];
+	uint32_t led_10_value, led_43_value;
+
+	if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
+		return;
+
+	if (!ha->beacon_blink_led)
+		return;
+
+	if (IS_QLA27XX(ha)) {
+		qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
+		qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
+	} else if (IS_QLA2031(ha)) {
+		led_select_value = qla83xx_select_led_port(ha);
+
+		qla83xx_wr_reg(vha, led_select_value, 0x40000230);
+		qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);
+	} else if (IS_QLA8031(ha)) {
+		led_select_value = qla83xx_select_led_port(ha);
+
+		qla83xx_rd_reg(vha, led_select_value, &led_10_value);
+		qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value);
+		qla83xx_wr_reg(vha, led_select_value, 0x01f44000);
+		msleep(500);
+		qla83xx_wr_reg(vha, led_select_value, 0x400001f4);
+		msleep(1000);
+		qla83xx_wr_reg(vha, led_select_value, led_10_value);
+		qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value);
+	} else if (IS_QLA81XX(ha)) {
+		int rval;
+
+		/* Save Current */
+		rval = qla81xx_get_led_config(vha, orig_led_cfg);
+		/* Do the blink */
+		if (rval == QLA_SUCCESS) {
+			if (IS_QLA81XX(ha)) {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x2000;
+				led_cfg[2] = 0;
+				led_cfg[3] = 0;
+				led_cfg[4] = 0;
+				led_cfg[5] = 0;
+			} else {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x4000;
+				led_cfg[2] = 0x4000;
+				led_cfg[3] = 0x2000;
+				led_cfg[4] = 0;
+				led_cfg[5] = 0x2000;
+			}
+			rval = qla81xx_set_led_config(vha, led_cfg);
+			msleep(1000);
+			if (IS_QLA81XX(ha)) {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x2000;
+				led_cfg[2] = 0;
+			} else {
+				led_cfg[0] = 0x4000;
+				led_cfg[1] = 0x2000;
+				led_cfg[2] = 0x4000;
+				led_cfg[3] = 0x4000;
+				led_cfg[4] = 0;
+				led_cfg[5] = 0x2000;
+			}
+			rval = qla81xx_set_led_config(vha, led_cfg);
+		}
+		/* On exit, restore original (presumes no status change) */
+		qla81xx_set_led_config(vha, orig_led_cfg);
+	}
+}
+
+int
+qla24xx_beacon_on(struct scsi_qla_host *vha)
+{
+	uint32_t gpio_data;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (IS_P3P_TYPE(ha))
+		return QLA_SUCCESS;
+
+	if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+		goto skip_gpio; /* let blink handle it */
+
+	if (ha->beacon_blink_led == 0) {
+		/* Enable firmware for update */
+		ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
+
+		if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+
+		if (qla2x00_get_fw_options(vha, ha->fw_options) !=
+		    QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x7009,
+			    "Unable to update fw options (beacon on).\n");
+			return QLA_FUNCTION_FAILED;
+		}
+
+		if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+			goto skip_gpio;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+		/* Enable the gpio_data reg for update. */
+		gpio_data |= GPDX_LED_UPDATE_MASK;
+		WRT_REG_DWORD(&reg->gpiod, gpio_data);
+		RD_REG_DWORD(&reg->gpiod);
+
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	}
+
+	/* So all colors blink together. */
+	ha->beacon_color_state = 0;
+
+skip_gpio:
+	/* Let the per HBA timer kick off the blinking process. */
+	ha->beacon_blink_led = 1;
+
+	return QLA_SUCCESS;
+}
+
+int
+qla24xx_beacon_off(struct scsi_qla_host *vha)
+{
+	uint32_t gpio_data;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+	if (IS_P3P_TYPE(ha))
+		return QLA_SUCCESS;
+
+	ha->beacon_blink_led = 0;
+
+	if (IS_QLA2031(ha) || IS_QLA27XX(ha))
+		goto set_fw_options;
+
+	if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+		return QLA_SUCCESS;
+
+	ha->beacon_color_state = QLA_LED_ALL_ON;
+
+	ha->isp_ops->beacon_blink(vha);	/* Will flip to all off. */
+
+	/* Give control back to firmware. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	gpio_data = RD_REG_DWORD(&reg->gpiod);
+
+	/* Disable the gpio_data reg for update. */
+	gpio_data &= ~GPDX_LED_UPDATE_MASK;
+	WRT_REG_DWORD(&reg->gpiod, gpio_data);
+	RD_REG_DWORD(&reg->gpiod);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+set_fw_options:
+	ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
+
+	if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x704d,
+		    "Unable to update fw options (beacon on).\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x704e,
+		    "Unable to update fw options (beacon on).\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	return QLA_SUCCESS;
+}
+
+
+/*
+ * Flash support routines
+ */
+
+/**
+ * qla2x00_flash_enable() - Setup flash for reading and writing.
+ * @ha: HA context
+ */
+static void
+qla2x00_flash_enable(struct qla_hw_data *ha)
+{
+	uint16_t data;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	data = RD_REG_WORD(&reg->ctrl_status);
+	data |= CSR_FLASH_ENABLE;
+	WRT_REG_WORD(&reg->ctrl_status, data);
+	RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+}
+
+/**
+ * qla2x00_flash_disable() - Disable flash and allow RISC to run.
+ * @ha: HA context
+ */
+static void
+qla2x00_flash_disable(struct qla_hw_data *ha)
+{
+	uint16_t data;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	data = RD_REG_WORD(&reg->ctrl_status);
+	data &= ~(CSR_FLASH_ENABLE);
+	WRT_REG_WORD(&reg->ctrl_status, data);
+	RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+}
+
+/**
+ * qla2x00_read_flash_byte() - Reads a byte from flash
+ * @ha: HA context
+ * @addr: Address in flash to read
+ *
+ * A word is read from the chip, but, only the lower byte is valid.
+ *
+ * Returns the byte read from flash @addr.
+ */
+static uint8_t
+qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
+{
+	uint16_t data;
+	uint16_t bank_select;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	bank_select = RD_REG_WORD(&reg->ctrl_status);
+
+	if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+		/* Specify 64K address range: */
+		/*  clear out Module Select and Flash Address bits [19:16]. */
+		bank_select &= ~0xf8;
+		bank_select |= addr >> 12 & 0xf0;
+		bank_select |= CSR_FLASH_64K_BANK;
+		WRT_REG_WORD(&reg->ctrl_status, bank_select);
+		RD_REG_WORD(&reg->ctrl_status);	/* PCI Posting. */
+
+		WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+		data = RD_REG_WORD(&reg->flash_data);
+
+		return (uint8_t)data;
+	}
+
+	/* Setup bit 16 of flash address. */
+	if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
+		bank_select |= CSR_FLASH_64K_BANK;
+		WRT_REG_WORD(&reg->ctrl_status, bank_select);
+		RD_REG_WORD(&reg->ctrl_status);	/* PCI Posting. */
+	} else if (((addr & BIT_16) == 0) &&
+	    (bank_select & CSR_FLASH_64K_BANK)) {
+		bank_select &= ~(CSR_FLASH_64K_BANK);
+		WRT_REG_WORD(&reg->ctrl_status, bank_select);
+		RD_REG_WORD(&reg->ctrl_status);	/* PCI Posting. */
+	}
+
+	/* Always perform IO mapped accesses to the FLASH registers. */
+	if (ha->pio_address) {
+		uint16_t data2;
+
+		WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
+		do {
+			data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
+			barrier();
+			cpu_relax();
+			data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
+		} while (data != data2);
+	} else {
+		WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+		data = qla2x00_debounce_register(&reg->flash_data);
+	}
+
+	return (uint8_t)data;
+}
+
+/**
+ * qla2x00_write_flash_byte() - Write a byte to flash
+ * @ha: HA context
+ * @addr: Address in flash to write
+ * @data: Data to write
+ */
+static void
+qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
+{
+	uint16_t bank_select;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	bank_select = RD_REG_WORD(&reg->ctrl_status);
+	if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+		/* Specify 64K address range: */
+		/*  clear out Module Select and Flash Address bits [19:16]. */
+		bank_select &= ~0xf8;
+		bank_select |= addr >> 12 & 0xf0;
+		bank_select |= CSR_FLASH_64K_BANK;
+		WRT_REG_WORD(&reg->ctrl_status, bank_select);
+		RD_REG_WORD(&reg->ctrl_status);	/* PCI Posting. */
+
+		WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+		WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+
+		return;
+	}
+
+	/* Setup bit 16 of flash address. */
+	if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
+		bank_select |= CSR_FLASH_64K_BANK;
+		WRT_REG_WORD(&reg->ctrl_status, bank_select);
+		RD_REG_WORD(&reg->ctrl_status);	/* PCI Posting. */
+	} else if (((addr & BIT_16) == 0) &&
+	    (bank_select & CSR_FLASH_64K_BANK)) {
+		bank_select &= ~(CSR_FLASH_64K_BANK);
+		WRT_REG_WORD(&reg->ctrl_status, bank_select);
+		RD_REG_WORD(&reg->ctrl_status);	/* PCI Posting. */
+	}
+
+	/* Always perform IO mapped accesses to the FLASH registers. */
+	if (ha->pio_address) {
+		WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
+		WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
+	} else {
+		WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+		WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
+		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */
+	}
+}
+
+/**
+ * qla2x00_poll_flash() - Polls flash for completion.
+ * @ha: HA context
+ * @addr: Address in flash to poll
+ * @poll_data: Data to be polled
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * This function polls the device until bit 7 of what is read matches data
+ * bit 7 or until data bit 5 becomes a 1.  If that hapens, the flash ROM timed
+ * out (a fatal error).  The flash book recommeds reading bit 7 again after
+ * reading bit 5 as a 1.
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
+    uint8_t man_id, uint8_t flash_id)
+{
+	int status;
+	uint8_t flash_data;
+	uint32_t cnt;
+
+	status = 1;
+
+	/* Wait for 30 seconds for command to finish. */
+	poll_data &= BIT_7;
+	for (cnt = 3000000; cnt; cnt--) {
+		flash_data = qla2x00_read_flash_byte(ha, addr);
+		if ((flash_data & BIT_7) == poll_data) {
+			status = 0;
+			break;
+		}
+
+		if (man_id != 0x40 && man_id != 0xda) {
+			if ((flash_data & BIT_5) && cnt > 2)
+				cnt = 2;
+		}
+		udelay(10);
+		barrier();
+		cond_resched();
+	}
+	return status;
+}
+
+/**
+ * qla2x00_program_flash_address() - Programs a flash address
+ * @ha: HA context
+ * @addr: Address in flash to program
+ * @data: Data to be written in flash
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
+    uint8_t data, uint8_t man_id, uint8_t flash_id)
+{
+	/* Write Program Command Sequence. */
+	if (IS_OEM_001(ha)) {
+		qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
+		qla2x00_write_flash_byte(ha, 0x555, 0x55);
+		qla2x00_write_flash_byte(ha, 0xaaa, 0xa0);
+		qla2x00_write_flash_byte(ha, addr, data);
+	} else {
+		if (man_id == 0xda && flash_id == 0xc1) {
+			qla2x00_write_flash_byte(ha, addr, data);
+			if (addr & 0x7e)
+				return 0;
+		} else {
+			qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+			qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+			qla2x00_write_flash_byte(ha, 0x5555, 0xa0);
+			qla2x00_write_flash_byte(ha, addr, data);
+		}
+	}
+
+	udelay(150);
+
+	/* Wait for write to complete. */
+	return qla2x00_poll_flash(ha, addr, data, man_id, flash_id);
+}
+
+/**
+ * qla2x00_erase_flash() - Erase the flash.
+ * @ha: HA context
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
+{
+	/* Individual Sector Erase Command Sequence */
+	if (IS_OEM_001(ha)) {
+		qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
+		qla2x00_write_flash_byte(ha, 0x555, 0x55);
+		qla2x00_write_flash_byte(ha, 0xaaa, 0x80);
+		qla2x00_write_flash_byte(ha, 0xaaa, 0xaa);
+		qla2x00_write_flash_byte(ha, 0x555, 0x55);
+		qla2x00_write_flash_byte(ha, 0xaaa, 0x10);
+	} else {
+		qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+		qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+		qla2x00_write_flash_byte(ha, 0x5555, 0x80);
+		qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+		qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+		qla2x00_write_flash_byte(ha, 0x5555, 0x10);
+	}
+
+	udelay(150);
+
+	/* Wait for erase to complete. */
+	return qla2x00_poll_flash(ha, 0x00, 0x80, man_id, flash_id);
+}
+
+/**
+ * qla2x00_erase_flash_sector() - Erase a flash sector.
+ * @ha: HA context
+ * @addr: Flash sector to erase
+ * @sec_mask: Sector address mask
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ *
+ * Returns 0 on success, else non-zero.
+ */
+static int
+qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
+    uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
+{
+	/* Individual Sector Erase Command Sequence */
+	qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+	qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+	qla2x00_write_flash_byte(ha, 0x5555, 0x80);
+	qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+	qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+	if (man_id == 0x1f && flash_id == 0x13)
+		qla2x00_write_flash_byte(ha, addr & sec_mask, 0x10);
+	else
+		qla2x00_write_flash_byte(ha, addr & sec_mask, 0x30);
+
+	udelay(150);
+
+	/* Wait for erase to complete. */
+	return qla2x00_poll_flash(ha, addr, 0x80, man_id, flash_id);
+}
+
+/**
+ * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
+ * @man_id: Flash manufacturer ID
+ * @flash_id: Flash ID
+ */
+static void
+qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
+    uint8_t *flash_id)
+{
+	qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+	qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+	qla2x00_write_flash_byte(ha, 0x5555, 0x90);
+	*man_id = qla2x00_read_flash_byte(ha, 0x0000);
+	*flash_id = qla2x00_read_flash_byte(ha, 0x0001);
+	qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
+	qla2x00_write_flash_byte(ha, 0x2aaa, 0x55);
+	qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
+}
+
+static void
+qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
+	uint32_t saddr, uint32_t length)
+{
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+	uint32_t midpoint, ilength;
+	uint8_t data;
+
+	midpoint = length / 2;
+
+	WRT_REG_WORD(&reg->nvram, 0);
+	RD_REG_WORD(&reg->nvram);
+	for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
+		if (ilength == midpoint) {
+			WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+			RD_REG_WORD(&reg->nvram);
+		}
+		data = qla2x00_read_flash_byte(ha, saddr);
+		if (saddr % 100)
+			udelay(10);
+		*tmp_buf = data;
+		cond_resched();
+	}
+}
+
+static inline void
+qla2x00_suspend_hba(struct scsi_qla_host *vha)
+{
+	int cnt;
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	/* Suspend HBA. */
+	scsi_block_requests(vha->host);
+	ha->isp_ops->disable_intrs(ha);
+	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+	/* Pause RISC. */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+	RD_REG_WORD(&reg->hccr);
+	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
+		for (cnt = 0; cnt < 30000; cnt++) {
+			if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+				break;
+			udelay(100);
+		}
+	} else {
+		udelay(10);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline void
+qla2x00_resume_hba(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Resume HBA. */
+	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	qla2x00_wait_for_chip_reset(vha);
+	scsi_unblock_requests(vha->host);
+}
+
+uint8_t *
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+    uint32_t offset, uint32_t length)
+{
+	uint32_t addr, midpoint;
+	uint8_t *data;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	/* Suspend HBA. */
+	qla2x00_suspend_hba(vha);
+
+	/* Go with read. */
+	midpoint = ha->optrom_size / 2;
+
+	qla2x00_flash_enable(ha);
+	WRT_REG_WORD(&reg->nvram, 0);
+	RD_REG_WORD(&reg->nvram);		/* PCI Posting. */
+	for (addr = offset, data = buf; addr < length; addr++, data++) {
+		if (addr == midpoint) {
+			WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+			RD_REG_WORD(&reg->nvram);	/* PCI Posting. */
+		}
+
+		*data = qla2x00_read_flash_byte(ha, addr);
+	}
+	qla2x00_flash_disable(ha);
+
+	/* Resume HBA. */
+	qla2x00_resume_hba(vha);
+
+	return buf;
+}
+
+int
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+    uint32_t offset, uint32_t length)
+{
+
+	int rval;
+	uint8_t man_id, flash_id, sec_number, data;
+	uint16_t wd;
+	uint32_t addr, liter, sec_mask, rest_addr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+	/* Suspend HBA. */
+	qla2x00_suspend_hba(vha);
+
+	rval = QLA_SUCCESS;
+	sec_number = 0;
+
+	/* Reset ISP chip. */
+	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+	/* Go with write. */
+	qla2x00_flash_enable(ha);
+	do {	/* Loop once to provide quick error exit */
+		/* Structure of flash memory based on manufacturer */
+		if (IS_OEM_001(ha)) {
+			/* OEM variant with special flash part. */
+			man_id = flash_id = 0;
+			rest_addr = 0xffff;
+			sec_mask   = 0x10000;
+			goto update_flash;
+		}
+		qla2x00_get_flash_manufacturer(ha, &man_id, &flash_id);
+		switch (man_id) {
+		case 0x20: /* ST flash. */
+			if (flash_id == 0xd2 || flash_id == 0xe3) {
+				/*
+				 * ST m29w008at part - 64kb sector size with
+				 * 32kb,8kb,8kb,16kb sectors at memory address
+				 * 0xf0000.
+				 */
+				rest_addr = 0xffff;
+				sec_mask = 0x10000;
+				break;
+			}
+			/*
+			 * ST m29w010b part - 16kb sector size
+			 * Default to 16kb sectors
+			 */
+			rest_addr = 0x3fff;
+			sec_mask = 0x1c000;
+			break;
+		case 0x40: /* Mostel flash. */
+			/* Mostel v29c51001 part - 512 byte sector size. */
+			rest_addr = 0x1ff;
+			sec_mask = 0x1fe00;
+			break;
+		case 0xbf: /* SST flash. */
+			/* SST39sf10 part - 4kb sector size. */
+			rest_addr = 0xfff;
+			sec_mask = 0x1f000;
+			break;
+		case 0xda: /* Winbond flash. */
+			/* Winbond W29EE011 part - 256 byte sector size. */
+			rest_addr = 0x7f;
+			sec_mask = 0x1ff80;
+			break;
+		case 0xc2: /* Macronix flash. */
+			/* 64k sector size. */
+			if (flash_id == 0x38 || flash_id == 0x4f) {
+				rest_addr = 0xffff;
+				sec_mask = 0x10000;
+				break;
+			}
+			/* Fall through... */
+
+		case 0x1f: /* Atmel flash. */
+			/* 512k sector size. */
+			if (flash_id == 0x13) {
+				rest_addr = 0x7fffffff;
+				sec_mask =   0x80000000;
+				break;
+			}
+			/* Fall through... */
+
+		case 0x01: /* AMD flash. */
+			if (flash_id == 0x38 || flash_id == 0x40 ||
+			    flash_id == 0x4f) {
+				/* Am29LV081 part - 64kb sector size. */
+				/* Am29LV002BT part - 64kb sector size. */
+				rest_addr = 0xffff;
+				sec_mask = 0x10000;
+				break;
+			} else if (flash_id == 0x3e) {
+				/*
+				 * Am29LV008b part - 64kb sector size with
+				 * 32kb,8kb,8kb,16kb sector at memory address
+				 * h0xf0000.
+				 */
+				rest_addr = 0xffff;
+				sec_mask = 0x10000;
+				break;
+			} else if (flash_id == 0x20 || flash_id == 0x6e) {
+				/*
+				 * Am29LV010 part or AM29f010 - 16kb sector
+				 * size.
+				 */
+				rest_addr = 0x3fff;
+				sec_mask = 0x1c000;
+				break;
+			} else if (flash_id == 0x6d) {
+				/* Am29LV001 part - 8kb sector size. */
+				rest_addr = 0x1fff;
+				sec_mask = 0x1e000;
+				break;
+			}
+		default:
+			/* Default to 16 kb sector size. */
+			rest_addr = 0x3fff;
+			sec_mask = 0x1c000;
+			break;
+		}
+
+update_flash:
+		if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+			if (qla2x00_erase_flash(ha, man_id, flash_id)) {
+				rval = QLA_FUNCTION_FAILED;
+				break;
+			}
+		}
+
+		for (addr = offset, liter = 0; liter < length; liter++,
+		    addr++) {
+			data = buf[liter];
+			/* Are we at the beginning of a sector? */
+			if ((addr & rest_addr) == 0) {
+				if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
+					if (addr >= 0x10000UL) {
+						if (((addr >> 12) & 0xf0) &&
+						    ((man_id == 0x01 &&
+							flash_id == 0x3e) ||
+						     (man_id == 0x20 &&
+							 flash_id == 0xd2))) {
+							sec_number++;
+							if (sec_number == 1) {
+								rest_addr =
+								    0x7fff;
+								sec_mask =
+								    0x18000;
+							} else if (
+							    sec_number == 2 ||
+							    sec_number == 3) {
+								rest_addr =
+								    0x1fff;
+								sec_mask =
+								    0x1e000;
+							} else if (
+							    sec_number == 4) {
+								rest_addr =
+								    0x3fff;
+								sec_mask =
+								    0x1c000;
+							}
+						}
+					}
+				} else if (addr == ha->optrom_size / 2) {
+					WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+					RD_REG_WORD(&reg->nvram);
+				}
+
+				if (flash_id == 0xda && man_id == 0xc1) {
+					qla2x00_write_flash_byte(ha, 0x5555,
+					    0xaa);
+					qla2x00_write_flash_byte(ha, 0x2aaa,
+					    0x55);
+					qla2x00_write_flash_byte(ha, 0x5555,
+					    0xa0);
+				} else if (!IS_QLA2322(ha) && !IS_QLA6322(ha)) {
+					/* Then erase it */
+					if (qla2x00_erase_flash_sector(ha,
+					    addr, sec_mask, man_id,
+					    flash_id)) {
+						rval = QLA_FUNCTION_FAILED;
+						break;
+					}
+					if (man_id == 0x01 && flash_id == 0x6d)
+						sec_number++;
+				}
+			}
+
+			if (man_id == 0x01 && flash_id == 0x6d) {
+				if (sec_number == 1 &&
+				    addr == (rest_addr - 1)) {
+					rest_addr = 0x0fff;
+					sec_mask   = 0x1f000;
+				} else if (sec_number == 3 && (addr & 0x7ffe)) {
+					rest_addr = 0x3fff;
+					sec_mask   = 0x1c000;
+				}
+			}
+
+			if (qla2x00_program_flash_address(ha, addr, data,
+			    man_id, flash_id)) {
+				rval = QLA_FUNCTION_FAILED;
+				break;
+			}
+			cond_resched();
+		}
+	} while (0);
+	qla2x00_flash_disable(ha);
+
+	/* Resume HBA. */
+	qla2x00_resume_hba(vha);
+
+	return rval;
+}
+
+uint8_t *
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+    uint32_t offset, uint32_t length)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Suspend HBA. */
+	scsi_block_requests(vha->host);
+	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+	/* Go with read. */
+	qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
+
+	/* Resume HBA. */
+	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+	scsi_unblock_requests(vha->host);
+
+	return buf;
+}
+
+int
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+    uint32_t offset, uint32_t length)
+{
+	int rval;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Suspend HBA. */
+	scsi_block_requests(vha->host);
+	set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+
+	/* Go with write. */
+	rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
+	    length >> 2);
+
+	clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
+	scsi_unblock_requests(vha->host);
+
+	return rval;
+}
+
+uint8_t *
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+    uint32_t offset, uint32_t length)
+{
+	int rval;
+	dma_addr_t optrom_dma;
+	void *optrom;
+	uint8_t *pbuf;
+	uint32_t faddr, left, burst;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+	    IS_QLA27XX(ha))
+		goto try_fast;
+	if (offset & 0xfff)
+		goto slow_read;
+	if (length < OPTROM_BURST_SIZE)
+		goto slow_read;
+
+try_fast:
+	optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+	    &optrom_dma, GFP_KERNEL);
+	if (!optrom) {
+		ql_log(ql_log_warn, vha, 0x00cc,
+		    "Unable to allocate memory for optrom burst read (%x KB).\n",
+		    OPTROM_BURST_SIZE / 1024);
+		goto slow_read;
+	}
+
+	pbuf = buf;
+	faddr = offset >> 2;
+	left = length >> 2;
+	burst = OPTROM_BURST_DWORDS;
+	while (left != 0) {
+		if (burst > left)
+			burst = left;
+
+		rval = qla2x00_dump_ram(vha, optrom_dma,
+		    flash_data_addr(ha, faddr), burst);
+		if (rval) {
+			ql_log(ql_log_warn, vha, 0x00f5,
+			    "Unable to burst-read optrom segment (%x/%x/%llx).\n",
+			    rval, flash_data_addr(ha, faddr),
+			    (unsigned long long)optrom_dma);
+			ql_log(ql_log_warn, vha, 0x00f6,
+			    "Reverting to slow-read.\n");
+
+			dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+			    optrom, optrom_dma);
+			goto slow_read;
+		}
+
+		memcpy(pbuf, optrom, burst * 4);
+
+		left -= burst;
+		faddr += burst;
+		pbuf += burst * 4;
+	}
+
+	dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom,
+	    optrom_dma);
+
+	return buf;
+
+slow_read:
+    return qla24xx_read_optrom_data(vha, buf, offset, length);
+}
+
+/**
+ * qla2x00_get_fcode_version() - Determine an FCODE image's version.
+ * @ha: HA context
+ * @pcids: Pointer to the FCODE PCI data structure
+ *
+ * The process of retrieving the FCODE version information is at best
+ * described as interesting.
+ *
+ * Within the first 100h bytes of the image an ASCII string is present
+ * which contains several pieces of information including the FCODE
+ * version.  Unfortunately it seems the only reliable way to retrieve
+ * the version is by scanning for another sentinel within the string,
+ * the FCODE build date:
+ *
+ *	... 2.00.02 10/17/02 ...
+ *
+ * Returns QLA_SUCCESS on successful retrieval of version.
+ */
+static void
+qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
+{
+	int ret = QLA_FUNCTION_FAILED;
+	uint32_t istart, iend, iter, vend;
+	uint8_t do_next, rbyte, *vbyte;
+
+	memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+
+	/* Skip the PCI data structure. */
+	istart = pcids +
+	    ((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) |
+		qla2x00_read_flash_byte(ha, pcids + 0x0A));
+	iend = istart + 0x100;
+	do {
+		/* Scan for the sentinel date string...eeewww. */
+		do_next = 0;
+		iter = istart;
+		while ((iter < iend) && !do_next) {
+			iter++;
+			if (qla2x00_read_flash_byte(ha, iter) == '/') {
+				if (qla2x00_read_flash_byte(ha, iter + 2) ==
+				    '/')
+					do_next++;
+				else if (qla2x00_read_flash_byte(ha,
+				    iter + 3) == '/')
+					do_next++;
+			}
+		}
+		if (!do_next)
+			break;
+
+		/* Backtrack to previous ' ' (space). */
+		do_next = 0;
+		while ((iter > istart) && !do_next) {
+			iter--;
+			if (qla2x00_read_flash_byte(ha, iter) == ' ')
+				do_next++;
+		}
+		if (!do_next)
+			break;
+
+		/*
+		 * Mark end of version tag, and find previous ' ' (space) or
+		 * string length (recent FCODE images -- major hack ahead!!!).
+		 */
+		vend = iter - 1;
+		do_next = 0;
+		while ((iter > istart) && !do_next) {
+			iter--;
+			rbyte = qla2x00_read_flash_byte(ha, iter);
+			if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10)
+				do_next++;
+		}
+		if (!do_next)
+			break;
+
+		/* Mark beginning of version tag, and copy data. */
+		iter++;
+		if ((vend - iter) &&
+		    ((vend - iter) < sizeof(ha->fcode_revision))) {
+			vbyte = ha->fcode_revision;
+			while (iter <= vend) {
+				*vbyte++ = qla2x00_read_flash_byte(ha, iter);
+				iter++;
+			}
+			ret = QLA_SUCCESS;
+		}
+	} while (0);
+
+	if (ret != QLA_SUCCESS)
+		memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+}
+
+int
+qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+	int ret = QLA_SUCCESS;
+	uint8_t code_type, last_image;
+	uint32_t pcihdr, pcids;
+	uint8_t *dbyte;
+	uint16_t *dcode;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->pio_address || !mbuf)
+		return QLA_FUNCTION_FAILED;
+
+	memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+	memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+	memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+	qla2x00_flash_enable(ha);
+
+	/* Begin with first PCI expansion ROM header. */
+	pcihdr = 0;
+	last_image = 1;
+	do {
+		/* Verify PCI expansion ROM header. */
+		if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
+		    qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
+			/* No signature */
+			ql_log(ql_log_fatal, vha, 0x0050,
+			    "No matching ROM signature.\n");
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		/* Locate PCI data structure. */
+		pcids = pcihdr +
+		    ((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) |
+			qla2x00_read_flash_byte(ha, pcihdr + 0x18));
+
+		/* Validate signature of PCI data structure. */
+		if (qla2x00_read_flash_byte(ha, pcids) != 'P' ||
+		    qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' ||
+		    qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
+		    qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
+			/* Incorrect header. */
+			ql_log(ql_log_fatal, vha, 0x0051,
+			    "PCI data struct not found pcir_adr=%x.\n", pcids);
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		/* Read version */
+		code_type = qla2x00_read_flash_byte(ha, pcids + 0x14);
+		switch (code_type) {
+		case ROM_CODE_TYPE_BIOS:
+			/* Intel x86, PC-AT compatible. */
+			ha->bios_revision[0] =
+			    qla2x00_read_flash_byte(ha, pcids + 0x12);
+			ha->bios_revision[1] =
+			    qla2x00_read_flash_byte(ha, pcids + 0x13);
+			ql_dbg(ql_dbg_init, vha, 0x0052,
+			    "Read BIOS %d.%d.\n",
+			    ha->bios_revision[1], ha->bios_revision[0]);
+			break;
+		case ROM_CODE_TYPE_FCODE:
+			/* Open Firmware standard for PCI (FCode). */
+			/* Eeeewww... */
+			qla2x00_get_fcode_version(ha, pcids);
+			break;
+		case ROM_CODE_TYPE_EFI:
+			/* Extensible Firmware Interface (EFI). */
+			ha->efi_revision[0] =
+			    qla2x00_read_flash_byte(ha, pcids + 0x12);
+			ha->efi_revision[1] =
+			    qla2x00_read_flash_byte(ha, pcids + 0x13);
+			ql_dbg(ql_dbg_init, vha, 0x0053,
+			    "Read EFI %d.%d.\n",
+			    ha->efi_revision[1], ha->efi_revision[0]);
+			break;
+		default:
+			ql_log(ql_log_warn, vha, 0x0054,
+			    "Unrecognized code type %x at pcids %x.\n",
+			    code_type, pcids);
+			break;
+		}
+
+		last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7;
+
+		/* Locate next PCI expansion ROM. */
+		pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) |
+		    qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512;
+	} while (!last_image);
+
+	if (IS_QLA2322(ha)) {
+		/* Read firmware image information. */
+		memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+		dbyte = mbuf;
+		memset(dbyte, 0, 8);
+		dcode = (uint16_t *)dbyte;
+
+		qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
+		    8);
+		ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
+		    "Dumping fw "
+		    "ver from flash:.\n");
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
+		    (uint8_t *)dbyte, 8);
+
+		if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
+		    dcode[2] == 0xffff && dcode[3] == 0xffff) ||
+		    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+		    dcode[3] == 0)) {
+			ql_log(ql_log_warn, vha, 0x0057,
+			    "Unrecognized fw revision at %x.\n",
+			    ha->flt_region_fw * 4);
+		} else {
+			/* values are in big endian */
+			ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
+			ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
+			ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
+			ql_dbg(ql_dbg_init, vha, 0x0058,
+			    "FW Version: "
+			    "%d.%d.%d.\n", ha->fw_revision[0],
+			    ha->fw_revision[1], ha->fw_revision[2]);
+		}
+	}
+
+	qla2x00_flash_disable(ha);
+
+	return ret;
+}
+
+int
+qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+	int ret = QLA_SUCCESS;
+	uint32_t pcihdr, pcids;
+	uint32_t *dcode;
+	uint8_t *bcode;
+	uint8_t code_type, last_image;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!mbuf)
+		return QLA_FUNCTION_FAILED;
+
+	memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+	memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+	memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+	dcode = mbuf;
+
+	/* Begin with first PCI expansion ROM header. */
+	pcihdr = ha->flt_region_boot << 2;
+	last_image = 1;
+	do {
+		/* Verify PCI expansion ROM header. */
+		ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
+		    0x20 * 4);
+		bcode = mbuf + (pcihdr % 4);
+		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+			/* No signature */
+			ql_log(ql_log_fatal, vha, 0x0154,
+			    "No matching ROM signature.\n");
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		/* Locate PCI data structure. */
+		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+		ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
+		    0x20 * 4);
+		bcode = mbuf + (pcihdr % 4);
+
+		/* Validate signature of PCI data structure. */
+		if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+		    bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+			/* Incorrect header. */
+			ql_log(ql_log_fatal, vha, 0x0155,
+			    "PCI data struct not found pcir_adr=%x.\n", pcids);
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		/* Read version */
+		code_type = bcode[0x14];
+		switch (code_type) {
+		case ROM_CODE_TYPE_BIOS:
+			/* Intel x86, PC-AT compatible. */
+			ha->bios_revision[0] = bcode[0x12];
+			ha->bios_revision[1] = bcode[0x13];
+			ql_dbg(ql_dbg_init, vha, 0x0156,
+			    "Read BIOS %d.%d.\n",
+			    ha->bios_revision[1], ha->bios_revision[0]);
+			break;
+		case ROM_CODE_TYPE_FCODE:
+			/* Open Firmware standard for PCI (FCode). */
+			ha->fcode_revision[0] = bcode[0x12];
+			ha->fcode_revision[1] = bcode[0x13];
+			ql_dbg(ql_dbg_init, vha, 0x0157,
+			    "Read FCODE %d.%d.\n",
+			    ha->fcode_revision[1], ha->fcode_revision[0]);
+			break;
+		case ROM_CODE_TYPE_EFI:
+			/* Extensible Firmware Interface (EFI). */
+			ha->efi_revision[0] = bcode[0x12];
+			ha->efi_revision[1] = bcode[0x13];
+			ql_dbg(ql_dbg_init, vha, 0x0158,
+			    "Read EFI %d.%d.\n",
+			    ha->efi_revision[1], ha->efi_revision[0]);
+			break;
+		default:
+			ql_log(ql_log_warn, vha, 0x0159,
+			    "Unrecognized code type %x at pcids %x.\n",
+			    code_type, pcids);
+			break;
+		}
+
+		last_image = bcode[0x15] & BIT_7;
+
+		/* Locate next PCI expansion ROM. */
+		pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+	} while (!last_image);
+
+	/* Read firmware image information. */
+	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+	dcode = mbuf;
+	ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
+	    0x20);
+	bcode = mbuf + (pcihdr % 4);
+
+	/* Validate signature of PCI data structure. */
+	if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 &&
+	    bcode[0x2] == 0x40 && bcode[0x3] == 0x40) {
+		ha->fw_revision[0] = bcode[0x4];
+		ha->fw_revision[1] = bcode[0x5];
+		ha->fw_revision[2] = bcode[0x6];
+		ql_dbg(ql_dbg_init, vha, 0x0153,
+		    "Firmware revision %d.%d.%d\n",
+		    ha->fw_revision[0], ha->fw_revision[1],
+		    ha->fw_revision[2]);
+	}
+
+	return ret;
+}
+
+int
+qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+	int ret = QLA_SUCCESS;
+	uint32_t pcihdr, pcids;
+	uint32_t *dcode;
+	uint8_t *bcode;
+	uint8_t code_type, last_image;
+	int i;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t faddr = 0;
+
+	pcihdr = pcids = 0;
+
+	if (IS_P3P_TYPE(ha))
+		return ret;
+
+	if (!mbuf)
+		return QLA_FUNCTION_FAILED;
+
+	memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+	memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+	memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+	dcode = mbuf;
+	pcihdr = ha->flt_region_boot << 2;
+	if (IS_QLA27XX(ha) &&
+	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+		pcihdr = ha->flt_region_boot_sec << 2;
+
+	last_image = 1;
+	do {
+		/* Verify PCI expansion ROM header. */
+		qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
+		bcode = mbuf + (pcihdr % 4);
+		if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+			/* No signature */
+			ql_log(ql_log_fatal, vha, 0x0059,
+			    "No matching ROM signature.\n");
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		/* Locate PCI data structure. */
+		pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+		qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
+		bcode = mbuf + (pcihdr % 4);
+
+		/* Validate signature of PCI data structure. */
+		if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+		    bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+			/* Incorrect header. */
+			ql_log(ql_log_fatal, vha, 0x005a,
+			    "PCI data struct not found pcir_adr=%x.\n", pcids);
+			ret = QLA_FUNCTION_FAILED;
+			break;
+		}
+
+		/* Read version */
+		code_type = bcode[0x14];
+		switch (code_type) {
+		case ROM_CODE_TYPE_BIOS:
+			/* Intel x86, PC-AT compatible. */
+			ha->bios_revision[0] = bcode[0x12];
+			ha->bios_revision[1] = bcode[0x13];
+			ql_dbg(ql_dbg_init, vha, 0x005b,
+			    "Read BIOS %d.%d.\n",
+			    ha->bios_revision[1], ha->bios_revision[0]);
+			break;
+		case ROM_CODE_TYPE_FCODE:
+			/* Open Firmware standard for PCI (FCode). */
+			ha->fcode_revision[0] = bcode[0x12];
+			ha->fcode_revision[1] = bcode[0x13];
+			ql_dbg(ql_dbg_init, vha, 0x005c,
+			    "Read FCODE %d.%d.\n",
+			    ha->fcode_revision[1], ha->fcode_revision[0]);
+			break;
+		case ROM_CODE_TYPE_EFI:
+			/* Extensible Firmware Interface (EFI). */
+			ha->efi_revision[0] = bcode[0x12];
+			ha->efi_revision[1] = bcode[0x13];
+			ql_dbg(ql_dbg_init, vha, 0x005d,
+			    "Read EFI %d.%d.\n",
+			    ha->efi_revision[1], ha->efi_revision[0]);
+			break;
+		default:
+			ql_log(ql_log_warn, vha, 0x005e,
+			    "Unrecognized code type %x at pcids %x.\n",
+			    code_type, pcids);
+			break;
+		}
+
+		last_image = bcode[0x15] & BIT_7;
+
+		/* Locate next PCI expansion ROM. */
+		pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+	} while (!last_image);
+
+	/* Read firmware image information. */
+	memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+	dcode = mbuf;
+	faddr = ha->flt_region_fw;
+	if (IS_QLA27XX(ha) &&
+	    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+		faddr = ha->flt_region_fw_sec;
+
+	qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
+	for (i = 0; i < 4; i++)
+		dcode[i] = be32_to_cpu(dcode[i]);
+
+	if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
+	    dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
+	    (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
+	    dcode[3] == 0)) {
+		ql_log(ql_log_warn, vha, 0x005f,
+		    "Unrecognized fw revision at %x.\n",
+		    ha->flt_region_fw * 4);
+	} else {
+		ha->fw_revision[0] = dcode[0];
+		ha->fw_revision[1] = dcode[1];
+		ha->fw_revision[2] = dcode[2];
+		ha->fw_revision[3] = dcode[3];
+		ql_dbg(ql_dbg_init, vha, 0x0060,
+		    "Firmware revision %d.%d.%d (%x).\n",
+		    ha->fw_revision[0], ha->fw_revision[1],
+		    ha->fw_revision[2], ha->fw_revision[3]);
+	}
+
+	/* Check for golden firmware and get version if available */
+	if (!IS_QLA81XX(ha)) {
+		/* Golden firmware is not present in non 81XX adapters */
+		return ret;
+	}
+
+	memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
+	dcode = mbuf;
+	ha->isp_ops->read_optrom(vha, (uint8_t *)dcode,
+	    ha->flt_region_gold_fw << 2, 32);
+
+	if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
+	    dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
+		ql_log(ql_log_warn, vha, 0x0056,
+		    "Unrecognized golden fw at 0x%x.\n",
+		    ha->flt_region_gold_fw * 4);
+		return ret;
+	}
+
+	for (i = 4; i < 8; i++)
+		ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]);
+
+	return ret;
+}
+
+static int
+qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
+{
+	if (pos >= end || *pos != 0x82)
+		return 0;
+
+	pos += 3 + pos[1];
+	if (pos >= end || *pos != 0x90)
+		return 0;
+
+	pos += 3 + pos[1];
+	if (pos >= end || *pos != 0x78)
+		return 0;
+
+	return 1;
+}
+
+int
+qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint8_t *pos = ha->vpd;
+	uint8_t *end = pos + ha->vpd_size;
+	int len = 0;
+
+	if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end))
+		return 0;
+
+	while (pos < end && *pos != 0x78) {
+		len = (*pos == 0x82) ? pos[1] : pos[2];
+
+		if (!strncmp(pos, key, strlen(key)))
+			break;
+
+		if (*pos != 0x90 && *pos != 0x91)
+			pos += len;
+
+		pos += 3;
+	}
+
+	if (pos < end - len && *pos != 0x78)
+		return scnprintf(str, size, "%.*s", len, pos + 3);
+
+	return 0;
+}
+
+int
+qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
+{
+	int len, max_len;
+	uint32_t fcp_prio_addr;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!ha->fcp_prio_cfg) {
+		ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
+		if (!ha->fcp_prio_cfg) {
+			ql_log(ql_log_warn, vha, 0x00d5,
+			    "Unable to allocate memory for fcp priority data (%x).\n",
+			    FCP_PRIO_CFG_SIZE);
+			return QLA_FUNCTION_FAILED;
+		}
+	}
+	memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
+
+	fcp_prio_addr = ha->flt_region_fcp_prio;
+
+	/* first read the fcp priority data header from flash */
+	ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
+			fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
+
+	if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
+		goto fail;
+
+	/* read remaining FCP CMD config data from flash */
+	fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
+	len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+	max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
+
+	ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0],
+			fcp_prio_addr << 2, (len < max_len ? len : max_len));
+
+	/* revalidate the entire FCP priority config data, including entries */
+	if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
+		goto fail;
+
+	ha->flags.fcp_prio_enabled = 1;
+	return QLA_SUCCESS;
+fail:
+	vfree(ha->fcp_prio_cfg);
+	ha->fcp_prio_cfg = NULL;
+	return QLA_FUNCTION_FAILED;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_target.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 0000000..21011c5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,7103 @@
+/*
+ *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ *  based on qla2x00t.c code:
+ *
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation, version 2
+ *  of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static int ql2xtgt_tape_enable;
+module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xtgt_tape_enable,
+		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+	"Determines when initiator mode will be enabled. Possible values: "
+	"\"exclusive\" - initiator mode will be enabled on load, "
+	"disabled on enabling target mode and then on disabling target mode "
+	"enabled back; "
+	"\"disabled\" - initiator mode will never be enabled; "
+	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
+	"when ready "
+	"\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql_dm_tgt_ex_pct = 0;
+module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
+	"For Dual Mode (qlini_mode=dual), this parameter determines "
+	"the percentage of exchanges/cmds FW will allocate resources "
+	"for Target mode.");
+
+int ql2xuctrlirq = 1;
+module_param(ql2xuctrlirq, int, 0644);
+MODULE_PARM_DESC(ql2xuctrlirq,
+    "User to control IRQ placement via smp_affinity."
+    "Valid with qlini_mode=disabled."
+    "1(default): enable");
+
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+static int temp_sam_status = SAM_STAT_BUSY;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+	FCP_TMF_CMPL = 0,
+	FCP_DATA_LEN_INVALID = 1,
+	FCP_CMND_FIELDS_INVALID = 2,
+	FCP_DATA_PARAM_MISMATCH = 3,
+	FCP_TMF_REJECTED = 4,
+	FCP_TMF_FAILED = 5,
+	FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE      0   /* simple task attribute */
+#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
+#define FCP_PTA_ORDERED     2   /* ordered task attribute */
+#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
+#define FCP_PTA_MASK        7   /* mask for task attribute field */
+#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ *   - Either context is IRQ and only IRQ handler can modify HW data,
+ *     including rings related fields,
+ *
+ *   - Or access to target mode variables from struct qla_tgt doesn't
+ *     cross those functions boundaries, except tgt_stop, which
+ *     additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+	struct atio_from_isp *pkt, uint8_t);
+static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
+	response_t *pkt);
+static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
+	int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
+	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
+static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint16_t status, int qfull);
+static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+static void qlt_send_notify_ack(struct qla_qpair *qpair,
+	struct imm_ntfy_from_isp *ntfy,
+	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *imm, int ha_locked);
+static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
+	fc_port_t *fcport, bool local);
+void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+	struct abts_recv_from_24xx *);
+static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
+    uint16_t);
+
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+struct kmem_cache *qla_tgt_plogi_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+static const char *prot_op_str(u32 prot_op)
+{
+	switch (prot_op) {
+	case TARGET_PROT_NORMAL:	return "NORMAL";
+	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
+	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
+	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
+	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
+	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
+	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
+	default:			return "UNKNOWN";
+	}
+}
+
+/* This API intentionally takes dest as a parameter, rather than returning
+ * int value to avoid caller forgetting to issue wmb() after the store */
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
+{
+	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+	*dest = atomic_inc_return(&base_vha->generation_tick);
+	/* memory barrier */
+	wmb();
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+	/* Send marker if required */
+	if (unlikely(vha->marker_needed != 0)) {
+		int rc = qla2x00_issue_marker(vha, vha_locked);
+		if (rc != QLA_SUCCESS) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+			    "qla_target(%d): issue_marker() failed\n",
+			    vha->vp_idx);
+		}
+		return rc;
+	}
+	return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+	uint8_t *d_id)
+{
+	struct scsi_qla_host *host;
+	uint32_t key = 0;
+
+	if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+	    (vha->d_id.b.al_pa == d_id[2]))
+		return vha;
+
+	key  = (uint32_t)d_id[0] << 16;
+	key |= (uint32_t)d_id[1] <<  8;
+	key |= (uint32_t)d_id[2];
+
+	host = btree_lookup32(&vha->hw->tgt.host_map, key);
+	if (!host)
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+		    "Unable to find host %06x\n", key);
+
+	return host;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+	uint16_t vp_idx)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->vp_idx == vp_idx)
+		return vha;
+
+	BUG_ON(ha->tgt.tgt_vp_map == NULL);
+	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+		return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+	return NULL;
+}
+
+static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+
+	vha->hw->tgt.num_pend_cmds++;
+	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
+		vha->qla_stats.stat_max_pend_cmds =
+			vha->hw->tgt.num_pend_cmds;
+	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+	vha->hw->tgt.num_pend_cmds--;
+	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+
+
+static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
+	struct atio_from_isp *atio, uint8_t ha_locked)
+{
+	struct qla_tgt_sess_op *u;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	unsigned long flags;
+
+	if (tgt->tgt_stop) {
+		ql_dbg(ql_dbg_async, vha, 0x502c,
+		    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
+		    vha->vp_idx);
+		goto out_term;
+	}
+
+	u = kzalloc(sizeof(*u), GFP_ATOMIC);
+	if (u == NULL)
+		goto out_term;
+
+	u->vha = vha;
+	memcpy(&u->atio, atio, sizeof(*atio));
+	INIT_LIST_HEAD(&u->cmd_list);
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	schedule_delayed_work(&vha->unknown_atio_work, 1);
+
+out:
+	return;
+
+out_term:
+	qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
+	goto out;
+}
+
+static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
+	uint8_t ha_locked)
+{
+	struct qla_tgt_sess_op *u, *t;
+	scsi_qla_host_t *host;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	unsigned long flags;
+	uint8_t queued = 0;
+
+	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
+		if (u->aborted) {
+			ql_dbg(ql_dbg_async, vha, 0x502e,
+			    "Freeing unknown %s %p, because of Abort\n",
+			    "ATIO_TYPE7", u);
+			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
+			    &u->atio, ha_locked, 0);
+			goto abort;
+		}
+
+		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+		if (host != NULL) {
+			ql_dbg(ql_dbg_async, vha, 0x502f,
+			    "Requeuing unknown ATIO_TYPE7 %p\n", u);
+			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
+		} else if (tgt->tgt_stop) {
+			ql_dbg(ql_dbg_async, vha, 0x503a,
+			    "Freeing unknown %s %p, because tgt is being stopped\n",
+			    "ATIO_TYPE7", u);
+			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
+			    &u->atio, ha_locked, 0);
+		} else {
+			ql_dbg(ql_dbg_async, vha, 0x503d,
+			    "Reschedule u %p, vha %p, host %p\n", u, vha, host);
+			if (!queued) {
+				queued = 1;
+				schedule_delayed_work(&vha->unknown_atio_work,
+				    1);
+			}
+			continue;
+		}
+
+abort:
+		spin_lock_irqsave(&vha->cmd_list_lock, flags);
+		list_del(&u->cmd_list);
+		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+		kfree(u);
+	}
+}
+
+void qlt_unknown_atio_work_fn(struct work_struct *work)
+{
+	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
+	    struct scsi_qla_host, unknown_atio_work);
+
+	qlt_try_to_dequeue_unknown_atios(vha, 0);
+}
+
+static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint8_t ha_locked)
+{
+	ql_dbg(ql_dbg_tgt, vha, 0xe072,
+		"%s: qla_target(%d): type %x ox_id %04x\n",
+		__func__, vha->vp_idx, atio->u.raw.entry_type,
+		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+
+	switch (atio->u.raw.entry_type) {
+	case ATIO_TYPE7:
+	{
+		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+		    atio->u.isp24.fcp_hdr.d_id);
+		if (unlikely(NULL == host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+			    "qla_target(%d): Received ATIO_TYPE7 "
+			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+			    atio->u.isp24.fcp_hdr.d_id[0],
+			    atio->u.isp24.fcp_hdr.d_id[1],
+			    atio->u.isp24.fcp_hdr.d_id[2]);
+
+
+			qlt_queue_unknown_atio(vha, atio, ha_locked);
+			break;
+		}
+		if (unlikely(!list_empty(&vha->unknown_atio_list)))
+			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
+
+		qlt_24xx_atio_pkt(host, atio, ha_locked);
+		break;
+	}
+
+	case IMMED_NOTIFY_TYPE:
+	{
+		struct scsi_qla_host *host = vha;
+		struct imm_ntfy_from_isp *entry =
+		    (struct imm_ntfy_from_isp *)atio;
+
+		qlt_issue_marker(vha, ha_locked);
+
+		if ((entry->u.isp24.vp_index != 0xFF) &&
+		    (entry->u.isp24.nport_handle != 0xFFFF)) {
+			host = qlt_find_host_by_vp_idx(vha,
+			    entry->u.isp24.vp_index);
+			if (unlikely(!host)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+				    "qla_target(%d): Received "
+				    "ATIO (IMMED_NOTIFY_TYPE) "
+				    "with unknown vp_index %d\n",
+				    vha->vp_idx, entry->u.isp24.vp_index);
+				break;
+			}
+		}
+		qlt_24xx_atio_pkt(host, atio, ha_locked);
+		break;
+	}
+
+	case VP_RPT_ID_IOCB_TYPE:
+		qla24xx_report_id_acquisition(vha,
+			(struct vp_rpt_id_entry_24xx *)atio);
+		break;
+
+	case ABTS_RECV_24XX:
+	{
+		struct abts_recv_from_24xx *entry =
+			(struct abts_recv_from_24xx *)atio;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+			entry->vp_index);
+		unsigned long flags;
+
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
+			    "received, with unknown vp_index %d\n",
+			    vha->vp_idx, entry->vp_index);
+			break;
+		}
+		if (!ha_locked)
+			spin_lock_irqsave(&host->hw->hardware_lock, flags);
+		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+		if (!ha_locked)
+			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
+		break;
+	}
+
+	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
+
+	default:
+		ql_dbg(ql_dbg_tgt, vha, 0xe040,
+		    "qla_target(%d): Received unknown ATIO atio "
+		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+		break;
+	}
+
+	return false;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
+	struct rsp_que *rsp, response_t *pkt)
+{
+	switch (pkt->entry_type) {
+	case CTIO_CRC2:
+		ql_dbg(ql_dbg_tgt, vha, 0xe073,
+			"qla_target(%d):%s: CRC2 Response pkt\n",
+			vha->vp_idx, __func__);
+	case CTIO_TYPE7:
+	{
+		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+		    entry->vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe041,
+			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
+			    "received, with unknown vp_index %d\n",
+			    vha->vp_idx, entry->vp_index);
+			break;
+		}
+		qlt_response_pkt(host, rsp, pkt);
+		break;
+	}
+
+	case IMMED_NOTIFY_TYPE:
+	{
+		struct scsi_qla_host *host = vha;
+		struct imm_ntfy_from_isp *entry =
+		    (struct imm_ntfy_from_isp *)pkt;
+
+		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe042,
+			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+			    "received, with unknown vp_index %d\n",
+			    vha->vp_idx, entry->u.isp24.vp_index);
+			break;
+		}
+		qlt_response_pkt(host, rsp, pkt);
+		break;
+	}
+
+	case NOTIFY_ACK_TYPE:
+	{
+		struct scsi_qla_host *host = vha;
+		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+		if (0xFF != entry->u.isp24.vp_index) {
+			host = qlt_find_host_by_vp_idx(vha,
+			    entry->u.isp24.vp_index);
+			if (unlikely(!host)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe043,
+				    "qla_target(%d): Response "
+				    "pkt (NOTIFY_ACK_TYPE) "
+				    "received, with unknown "
+				    "vp_index %d\n", vha->vp_idx,
+				    entry->u.isp24.vp_index);
+				break;
+			}
+		}
+		qlt_response_pkt(host, rsp, pkt);
+		break;
+	}
+
+	case ABTS_RECV_24XX:
+	{
+		struct abts_recv_from_24xx *entry =
+		    (struct abts_recv_from_24xx *)pkt;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+		    entry->vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe044,
+			    "qla_target(%d): Response pkt "
+			    "(ABTS_RECV_24XX) received, with unknown "
+			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
+			break;
+		}
+		qlt_response_pkt(host, rsp, pkt);
+		break;
+	}
+
+	case ABTS_RESP_24XX:
+	{
+		struct abts_resp_to_24xx *entry =
+		    (struct abts_resp_to_24xx *)pkt;
+		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+		    entry->vp_index);
+		if (unlikely(!host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe045,
+			    "qla_target(%d): Response pkt "
+			    "(ABTS_RECV_24XX) received, with unknown "
+			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
+			break;
+		}
+		qlt_response_pkt(host, rsp, pkt);
+		break;
+	}
+
+	default:
+		qlt_response_pkt(vha, rsp, pkt);
+		break;
+	}
+
+}
+
+/*
+ * All qlt_plogi_ack_t operations are protected by hardware_lock
+ */
+static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
+	struct imm_ntfy_from_isp *ntfy, int type)
+{
+	struct qla_work_evt *e;
+	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.nack.fcport = fcport;
+	e->u.nack.type = type;
+	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
+	return qla2x00_post_work(vha, e);
+}
+
+static
+void qla2x00_async_nack_sp_done(void *s, int res)
+{
+	struct srb *sp = (struct srb *)s;
+	struct scsi_qla_host *vha = sp->vha;
+	unsigned long flags;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20f2,
+	    "Async done-%s res %x %8phC  type %d\n",
+	    sp->name, res, sp->fcport->port_name, sp->type);
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	sp->fcport->flags &= ~FCF_ASYNC_SENT;
+	sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+
+	switch (sp->type) {
+	case SRB_NACK_PLOGI:
+		sp->fcport->login_gen++;
+		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
+		sp->fcport->logout_on_delete = 1;
+		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
+		sp->fcport->send_els_logo = 0;
+		break;
+
+	case SRB_NACK_PRLI:
+		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+		sp->fcport->deleted = 0;
+		sp->fcport->send_els_logo = 0;
+
+		if (!sp->fcport->login_succ &&
+		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
+			sp->fcport->login_succ = 1;
+
+			vha->fcport_count++;
+
+			if (!IS_IIDMA_CAPABLE(vha->hw) ||
+			    !vha->hw->flags.gpsc_supported) {
+				ql_dbg(ql_dbg_disc, vha, 0x20f3,
+				    "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+				    __func__, __LINE__,
+				    sp->fcport->port_name,
+				    vha->fcport_count);
+
+				qla24xx_post_upd_fcport_work(vha, sp->fcport);
+			} else {
+				ql_dbg(ql_dbg_disc, vha, 0x20f5,
+				    "%s %d %8phC post gpsc fcp_cnt %d\n",
+				    __func__, __LINE__,
+				    sp->fcport->port_name,
+				    vha->fcport_count);
+
+				qla24xx_post_gpsc_work(vha, sp->fcport);
+			}
+		}
+		break;
+
+	case SRB_NACK_LOGO:
+		sp->fcport->login_gen++;
+		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
+		break;
+	}
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	sp->free(sp);
+}
+
+int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
+	struct imm_ntfy_from_isp *ntfy, int type)
+{
+	int rval = QLA_FUNCTION_FAILED;
+	srb_t *sp;
+	char *c = NULL;
+
+	fcport->flags |= FCF_ASYNC_SENT;
+	switch (type) {
+	case SRB_NACK_PLOGI:
+		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+		c = "PLOGI";
+		break;
+	case SRB_NACK_PRLI:
+		fcport->fw_login_state = DSC_LS_PRLI_PEND;
+		fcport->deleted = 0;
+		c = "PRLI";
+		break;
+	case SRB_NACK_LOGO:
+		fcport->fw_login_state = DSC_LS_LOGO_PEND;
+		c = "LOGO";
+		break;
+	}
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+	if (!sp)
+		goto done;
+
+	sp->type = type;
+	sp->name = "nack";
+
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+	sp->u.iocb_cmd.u.nack.ntfy = ntfy;
+	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+	sp->done = qla2x00_async_nack_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20f4,
+	    "Async-%s %8phC hndl %x %s\n",
+	    sp->name, fcport->port_name, sp->handle, c);
+
+	return rval;
+
+done_free_sp:
+	sp->free(sp);
+done:
+	fcport->flags &= ~FCF_ASYNC_SENT;
+	return rval;
+}
+
+void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+	fc_port_t *t;
+	unsigned long flags;
+
+	switch (e->u.nack.type) {
+	case SRB_NACK_PRLI:
+		mutex_lock(&vha->vha_tgt.tgt_mutex);
+		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
+		if (t) {
+			ql_log(ql_log_info, vha, 0xd034,
+			    "%s create sess success %p", __func__, t);
+			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+			/* create sess has an extra kref */
+			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
+			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		}
+		break;
+	}
+	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
+	    (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
+}
+
+void qla24xx_delete_sess_fn(struct work_struct *work)
+{
+	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
+	struct qla_hw_data *ha = fcport->vha->hw;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+
+	if (fcport->se_sess) {
+		ha->tgt.tgt_ops->shutdown_sess(fcport);
+		ha->tgt.tgt_ops->put_sess(fcport);
+	} else {
+		qlt_unreg_sess(fcport);
+	}
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+/*
+ * Called from qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct fc_port *sess = fcport;
+	unsigned long flags;
+
+	if (!vha->hw->tgt.tgt_ops)
+		return;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	if (tgt->tgt_stop) {
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+		return;
+	}
+
+	if (fcport->disc_state == DSC_DELETE_PEND) {
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+		return;
+	}
+
+	if (!sess->se_sess) {
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+		mutex_lock(&vha->vha_tgt.tgt_mutex);
+		sess = qlt_create_sess(vha, fcport, false);
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	} else {
+		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
+			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+			return;
+		}
+
+		if (!kref_get_unless_zero(&sess->sess_kref)) {
+			ql_dbg(ql_dbg_disc, vha, 0x2107,
+			    "%s: kref_get fail sess %8phC \n",
+			    __func__, sess->port_name);
+			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+			return;
+		}
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+		    "qla_target(%u): %ssession for port %8phC "
+		    "(loop ID %d) reappeared\n", vha->vp_idx,
+		    sess->local ? "local " : "", sess->port_name, sess->loop_id);
+
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+		    "Reappeared sess %p\n", sess);
+
+		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
+		    fcport->loop_id,
+		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+	}
+
+	if (sess && sess->local) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+		    "qla_target(%u): local session for "
+		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
+		    fcport->port_name, sess->loop_id);
+		sess->local = 0;
+	}
+	ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+/*
+ * This is a zero-base ref-counting solution, since hardware_lock
+ * guarantees that ref_count is not modified concurrently.
+ * Upon successful return content of iocb is undefined
+ */
+static struct qlt_plogi_ack_t *
+qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
+		       struct imm_ntfy_from_isp *iocb)
+{
+	struct qlt_plogi_ack_t *pla;
+
+	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
+		if (pla->id.b24 == id->b24) {
+			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
+			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
+			return pla;
+		}
+	}
+
+	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
+	if (!pla) {
+		ql_dbg(ql_dbg_async, vha, 0x5088,
+		       "qla_target(%d): Allocation of plogi_ack failed\n",
+		       vha->vp_idx);
+		return NULL;
+	}
+
+	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
+	pla->id = *id;
+	list_add_tail(&pla->list, &vha->plogi_ack_list);
+
+	return pla;
+}
+
+void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
+    struct qlt_plogi_ack_t *pla)
+{
+	struct imm_ntfy_from_isp *iocb = &pla->iocb;
+	port_id_t port_id;
+	uint16_t loop_id;
+	fc_port_t *fcport = pla->fcport;
+
+	BUG_ON(!pla->ref_count);
+	pla->ref_count--;
+
+	if (pla->ref_count)
+		return;
+
+	ql_dbg(ql_dbg_disc, vha, 0x5089,
+	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
+	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
+	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
+	    iocb->u.isp24.port_id[0],
+	    le16_to_cpu(iocb->u.isp24.nport_handle),
+	    iocb->u.isp24.exchange_address, iocb->ox_id);
+
+	port_id.b.domain = iocb->u.isp24.port_id[2];
+	port_id.b.area   = iocb->u.isp24.port_id[1];
+	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
+	port_id.b.rsvd_1 = 0;
+
+	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+	fcport->loop_id = loop_id;
+	fcport->d_id = port_id;
+	qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
+			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
+		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
+			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
+	}
+
+	list_del(&pla->list);
+	kmem_cache_free(qla_tgt_plogi_cachep, pla);
+}
+
+void
+qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
+    struct fc_port *sess, enum qlt_plogi_link_t link)
+{
+	struct imm_ntfy_from_isp *iocb = &pla->iocb;
+	/* Inc ref_count first because link might already be pointing at pla */
+	pla->ref_count++;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
+		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
+		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
+		sess, link, sess->port_name,
+		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
+		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+		pla->ref_count, pla, link);
+
+	if (sess->plogi_link[link])
+		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
+
+	if (link == QLT_PLOGI_LINK_SAME_WWN)
+		pla->fcport = sess;
+
+	sess->plogi_link[link] = pla;
+}
+
+typedef struct {
+	/* These fields must be initialized by the caller */
+	port_id_t id;
+	/*
+	 * number of cmds dropped while we were waiting for
+	 * initiator to ack LOGO initialize to 1 if LOGO is
+	 * triggered by a command, otherwise, to 0
+	 */
+	int cmd_count;
+
+	/* These fields are used by callee */
+	struct list_head list;
+} qlt_port_logo_t;
+
+static void
+qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
+{
+	qlt_port_logo_t *tmp;
+	int res;
+
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
+
+	list_for_each_entry(tmp, &vha->logo_list, list) {
+		if (tmp->id.b24 == logo->id.b24) {
+			tmp->cmd_count += logo->cmd_count;
+			mutex_unlock(&vha->vha_tgt.tgt_mutex);
+			return;
+		}
+	}
+
+	list_add_tail(&logo->list, &vha->logo_list);
+
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
+
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
+	list_del(&logo->list);
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
+	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
+	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
+	    logo->cmd_count, res);
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+	struct fc_port *sess = container_of(work, struct fc_port,
+	    free_work);
+	struct qla_tgt *tgt = sess->tgt;
+	struct scsi_qla_host *vha = sess->vha;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	bool logout_started = false;
+	struct event_arg ea;
+	scsi_qla_host_t *base_vha;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
+		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
+		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
+		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
+		sess->logout_on_delete, sess->keep_nport_handle,
+		sess->send_els_logo);
+
+	if (!IS_SW_RESV_ADDR(sess->d_id)) {
+		if (sess->send_els_logo) {
+			qlt_port_logo_t logo;
+
+			logo.id = sess->d_id;
+			logo.cmd_count = 0;
+			sess->send_els_logo = 0;
+			qlt_send_first_logo(vha, &logo);
+		}
+
+		if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
+			int rc;
+
+			rc = qla2x00_post_async_logout_work(vha, sess, NULL);
+			if (rc != QLA_SUCCESS)
+				ql_log(ql_log_warn, vha, 0xf085,
+				    "Schedule logo failed sess %p rc %d\n",
+				    sess, rc);
+			else
+				logout_started = true;
+		}
+	}
+
+	/*
+	 * Release the target session for FC Nexus from fabric module code.
+	 */
+	if (sess->se_sess != NULL)
+		ha->tgt.tgt_ops->free_session(sess);
+
+	if (logout_started) {
+		bool traced = false;
+		u16 cnt = 0;
+
+		while (!ACCESS_ONCE(sess->logout_completed)) {
+			if (!traced) {
+				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+					"%s: waiting for sess %p logout\n",
+					__func__, sess);
+				traced = true;
+			}
+			msleep(100);
+			cnt++;
+			if (cnt > 200)
+				break;
+		}
+
+		ql_dbg(ql_dbg_disc, vha, 0xf087,
+		    "%s: sess %p logout completed\n",__func__, sess);
+	}
+
+	if (sess->logo_ack_needed) {
+		sess->logo_ack_needed = 0;
+		qla24xx_async_notify_ack(vha, sess,
+			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
+	}
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	if (sess->se_sess) {
+		sess->se_sess = NULL;
+		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
+			tgt->sess_count--;
+	}
+
+	sess->disc_state = DSC_DELETED;
+	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+	sess->deleted = QLA_SESS_DELETED;
+	sess->login_retry = vha->hw->login_retry_count;
+
+	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
+		vha->fcport_count--;
+		sess->login_succ = 0;
+	}
+
+	qla2x00_clear_loop_id(sess);
+
+	if (sess->conflict) {
+		sess->conflict->login_pause = 0;
+		sess->conflict = NULL;
+		if (!test_bit(UNLOADING, &vha->dpc_flags))
+			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+	}
+
+	{
+		struct qlt_plogi_ack_t *own =
+		    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
+		struct qlt_plogi_ack_t *con =
+		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
+		struct imm_ntfy_from_isp *iocb;
+
+		if (con) {
+			iocb = &con->iocb;
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
+				 "se_sess %p / sess %p port %8phC is gone,"
+				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
+				 sess->se_sess, sess, sess->port_name,
+				 own ? "releasing own PLOGI" : "no own PLOGI pending",
+				 own ? own->ref_count : -1,
+				 iocb->u.isp24.port_name, con->ref_count);
+			qlt_plogi_ack_unref(vha, con);
+			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
+		} else {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
+			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
+			    sess->se_sess, sess, sess->port_name,
+			    own ? "releasing own PLOGI" :
+			    "no own PLOGI pending",
+			    own ? own->ref_count : -1);
+		}
+
+		if (own) {
+			sess->fw_login_state = DSC_LS_PLOGI_PEND;
+			qlt_plogi_ack_unref(vha, own);
+			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
+		}
+	}
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
+		sess, sess->port_name, vha->fcport_count);
+
+	if (tgt && (tgt->sess_count == 0))
+		wake_up_all(&tgt->waitQ);
+
+	if (vha->fcport_count == 0)
+		wake_up_all(&vha->fcport_waitQ);
+
+	base_vha = pci_get_drvdata(ha->pdev);
+	if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
+		return;
+
+	if (!tgt || !tgt->tgt_stop) {
+		memset(&ea, 0, sizeof(ea));
+		ea.event = FCME_DELETE_DONE;
+		ea.fcport = sess;
+		qla2x00_fcport_event_handler(vha, &ea);
+	}
+}
+
+/* ha->tgt.sess_lock supposed to be held on entry */
+void qlt_unreg_sess(struct fc_port *sess)
+{
+	struct scsi_qla_host *vha = sess->vha;
+
+	ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
+	    "%s sess %p for deletion %8phC\n",
+	    __func__, sess, sess->port_name);
+
+	if (sess->se_sess)
+		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+	qla2x00_mark_device_lost(vha, sess, 1, 1);
+
+	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+	sess->disc_state = DSC_DELETE_PEND;
+	sess->last_rscn_gen = sess->rscn_gen;
+	sess->last_login_gen = sess->login_gen;
+
+	if (sess->nvme_flag & NVME_FLAG_REGISTERED)
+		schedule_work(&sess->nvme_del_work);
+
+	INIT_WORK(&sess->free_work, qlt_free_session_done);
+	schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess = NULL;
+	uint16_t loop_id;
+	int res = 0;
+	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+	unsigned long flags;
+
+	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+	if (loop_id == 0xFFFF) {
+		/* Global event */
+		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	} else {
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe000,
+	    "Using sess for qla_tgt_reset: %p\n", sess);
+	if (!sess) {
+		res = -ESRCH;
+		return res;
+	}
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe047,
+	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
+	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
+	    mcmd, loop_id);
+
+	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+static void qla24xx_chk_fcp_state(struct fc_port *sess)
+{
+	if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
+		sess->logout_on_delete = 0;
+		sess->logo_ack_needed = 0;
+		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+		sess->scan_state = 0;
+	}
+}
+
+/* ha->tgt.sess_lock supposed to be held on entry */
+void qlt_schedule_sess_for_deletion(struct fc_port *sess,
+	bool immediate)
+{
+	struct qla_tgt *tgt = sess->tgt;
+
+	if (sess->disc_state == DSC_DELETE_PEND)
+		return;
+
+	if (sess->disc_state == DSC_DELETED) {
+		if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
+			wake_up_all(&tgt->waitQ);
+		if (sess->vha->fcport_count == 0)
+			wake_up_all(&sess->vha->fcport_waitQ);
+
+		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
+			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
+			return;
+	}
+
+	sess->disc_state = DSC_DELETE_PEND;
+
+	if (sess->deleted == QLA_SESS_DELETED)
+		sess->logout_on_delete = 0;
+
+	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+	qla24xx_chk_fcp_state(sess);
+
+	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+	    "Scheduling sess %p for deletion %8phC\n",
+	    sess, sess->port_name);
+
+	queue_work(sess->vha->hw->wq, &sess->del_work);
+}
+
+void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
+{
+	unsigned long flags;
+	struct qla_hw_data *ha = sess->vha->hw;
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	qlt_schedule_sess_for_deletion(sess, 1);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+/* ha->tgt.sess_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt)
+{
+	struct fc_port *sess;
+	scsi_qla_host_t *vha = tgt->vha;
+
+	list_for_each_entry(sess, &vha->vp_fcports, list) {
+		if (sess->se_sess)
+			qlt_schedule_sess_for_deletion(sess, 1);
+	}
+
+	/* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+	uint16_t *loop_id)
+{
+	struct qla_hw_data *ha = vha->hw;
+	dma_addr_t gid_list_dma;
+	struct gid_list_info *gid_list;
+	char *id_iter;
+	int res, rc, i;
+	uint16_t entries;
+
+	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+	    &gid_list_dma, GFP_KERNEL);
+	if (!gid_list) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+		    "qla_target(%d): DMA Alloc failed of %u\n",
+		    vha->vp_idx, qla2x00_gid_list_size(ha));
+		return -ENOMEM;
+	}
+
+	/* Get list of logged in devices */
+	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
+	if (rc != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+		    "qla_target(%d): get_id_list() failed: %x\n",
+		    vha->vp_idx, rc);
+		res = -EBUSY;
+		goto out_free_id_list;
+	}
+
+	id_iter = (char *)gid_list;
+	res = -ENOENT;
+	for (i = 0; i < entries; i++) {
+		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+		if ((gid->al_pa == s_id[2]) &&
+		    (gid->area == s_id[1]) &&
+		    (gid->domain == s_id[0])) {
+			*loop_id = le16_to_cpu(gid->loop_id);
+			res = 0;
+			break;
+		}
+		id_iter += ha->gid_list_info_size;
+	}
+
+out_free_id_list:
+	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+	    gid_list, gid_list_dma);
+	return res;
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct fc_port *qlt_create_sess(
+	struct scsi_qla_host *vha,
+	fc_port_t *fcport,
+	bool local)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess = fcport;
+	unsigned long flags;
+
+	if (vha->vha_tgt.qla_tgt->tgt_stop)
+		return NULL;
+
+	if (fcport->se_sess) {
+		if (!kref_get_unless_zero(&sess->sess_kref)) {
+			ql_dbg(ql_dbg_disc, vha, 0x20f6,
+			    "%s: kref_get_unless_zero failed for %8phC\n",
+			    __func__, sess->port_name);
+			return NULL;
+		}
+		return fcport;
+	}
+	sess->tgt = vha->vha_tgt.qla_tgt;
+	sess->local = local;
+
+	/*
+	 * Under normal circumstances we want to logout from firmware when
+	 * session eventually ends and release corresponding nport handle.
+	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
+	 * code will adjust these flags as necessary.
+	 */
+	sess->logout_on_delete = 1;
+	sess->keep_nport_handle = 0;
+	sess->logout_completed = 0;
+
+	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+	    &fcport->port_name[0], sess) < 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
+		    "(%d) %8phC check_initiator_node_acl failed\n",
+		    vha->vp_idx, fcport->port_name);
+		return NULL;
+	} else {
+		kref_init(&fcport->sess_kref);
+		/*
+		 * Take an extra reference to ->sess_kref here to handle
+		 * fc_port access across ->tgt.sess_lock reaquire.
+		 */
+		if (!kref_get_unless_zero(&sess->sess_kref)) {
+			ql_dbg(ql_dbg_disc, vha, 0x20f7,
+			    "%s: kref_get_unless_zero failed for %8phC\n",
+			    __func__, sess->port_name);
+			return NULL;
+		}
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		if (!IS_SW_RESV_ADDR(sess->d_id))
+			vha->vha_tgt.qla_tgt->sess_count++;
+
+		qlt_do_generation_tick(vha, &sess->generation);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
+	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
+	    vha->vha_tgt.qla_tgt->sess_count);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
+	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
+	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
+	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
+	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
+
+	return sess;
+}
+
+/*
+ * max_gen - specifies maximum session generation
+ * at which this deletion requestion is still valid
+ */
+void
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
+{
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct fc_port *sess = fcport;
+	unsigned long flags;
+
+	if (!vha->hw->tgt.tgt_ops)
+		return;
+
+	if (!tgt)
+		return;
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	if (tgt->tgt_stop) {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		return;
+	}
+	if (!sess->se_sess) {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		return;
+	}
+
+	if (max_gen - sess->generation < 0) {
+		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
+		    "Ignoring stale deletion request for se_sess %p / sess %p"
+		    " for port %8phC, req_gen %d, sess_gen %d\n",
+		    sess->se_sess, sess, sess->port_name, max_gen,
+		    sess->generation);
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+	sess->local = 1;
+	qlt_schedule_sess_for_deletion(sess, false);
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+	struct qla_hw_data *ha = tgt->ha;
+	unsigned long flags;
+	int res;
+	/*
+	 * We need to protect against race, when tgt is freed before or
+	 * inside wake_up()
+	 */
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+	    "tgt %p, sess_count=%d\n",
+	    tgt, tgt->sess_count);
+	res = (tgt->sess_count == 0);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+int qlt_stop_phase1(struct qla_tgt *tgt)
+{
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = tgt->ha;
+	unsigned long flags;
+
+	mutex_lock(&qla_tgt_mutex);
+	if (!vha->fc_vport) {
+		struct Scsi_Host *sh = vha->host;
+		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+		bool npiv_vports;
+
+		spin_lock_irqsave(sh->host_lock, flags);
+		npiv_vports = (fc_host->npiv_vports_inuse);
+		spin_unlock_irqrestore(sh->host_lock, flags);
+
+		if (npiv_vports) {
+			mutex_unlock(&qla_tgt_mutex);
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+			    "NPIV is in use. Can not stop target\n");
+			return -EPERM;
+		}
+	}
+	if (tgt->tgt_stop || tgt->tgt_stopped) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+		    "Already in tgt->tgt_stop or tgt_stopped state\n");
+		mutex_unlock(&qla_tgt_mutex);
+		return -EPERM;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+	    vha->host_no, vha);
+	/*
+	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+	 * Lock is needed, because we still can get an incoming packet.
+	 */
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	tgt->tgt_stop = 1;
+	qlt_clear_tgt_db(tgt);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
+	mutex_unlock(&qla_tgt_mutex);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+	    "Waiting for sess works (tgt %p)", tgt);
+	spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	while (!list_empty(&tgt->sess_works_list)) {
+		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+		flush_scheduled_work();
+		spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	}
+	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
+
+	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+
+	/* Big hammer */
+	if (!ha->flags.host_shutting_down &&
+	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
+		qlt_disable_vha(vha);
+
+	/* Wait for sessions to clear out (just in case) */
+	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+	return 0;
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+	scsi_qla_host_t *vha = tgt->vha;
+
+	if (tgt->tgt_stopped) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
+		    "Already in tgt->tgt_stopped state\n");
+		dump_stack();
+		return;
+	}
+	if (!tgt->tgt_stop) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
+		    "%s: phase1 stop is not completed\n", __func__);
+		dump_stack();
+		return;
+	}
+
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
+	tgt->tgt_stop = 0;
+	tgt->tgt_stopped = 1;
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
+	    tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+static void qlt_release(struct qla_tgt *tgt)
+{
+	scsi_qla_host_t *vha = tgt->vha;
+	void *node;
+	u64 key = 0;
+	u16 i;
+	struct qla_qpair_hint *h;
+	struct qla_hw_data *ha = vha->hw;
+
+	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
+	    !tgt->tgt_stopped)
+		qlt_stop_phase1(tgt);
+
+	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+		qlt_stop_phase2(tgt);
+
+	for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
+		unsigned long flags;
+
+		h = &tgt->qphints[i];
+		if (h->qpair) {
+			spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
+			list_del(&h->hint_elem);
+			spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
+			h->qpair = NULL;
+		}
+	}
+	kfree(tgt->qphints);
+	mutex_lock(&qla_tgt_mutex);
+	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
+	mutex_unlock(&qla_tgt_mutex);
+
+	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
+		btree_remove64(&tgt->lun_qpair_map, key);
+
+	btree_destroy64(&tgt->lun_qpair_map);
+
+	if (vha->vp_idx)
+		if (ha->tgt.tgt_ops &&
+		    ha->tgt.tgt_ops->remove_target &&
+		    vha->vha_tgt.target_lport_ptr)
+			ha->tgt.tgt_ops->remove_target(vha);
+
+	vha->vha_tgt.qla_tgt = NULL;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
+	    "Release of tgt %p finished\n", tgt);
+
+	kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+	const void *param, unsigned int param_size)
+{
+	struct qla_tgt_sess_work_param *prm;
+	unsigned long flags;
+
+	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+	if (!prm) {
+		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+		    "qla_target(%d): Unable to create session "
+		    "work, command will be refused", 0);
+		return -ENOMEM;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+	    "Scheduling work (type %d, prm %p)"
+	    " to find session for param %p (size %d, tgt %p)\n",
+	    type, prm, param, param_size, tgt);
+
+	prm->type = type;
+	memcpy(&prm->tm_iocb, param, param_size);
+
+	spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+	schedule_work(&tgt->sess_work);
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct qla_qpair *qpair,
+	struct imm_ntfy_from_isp *ntfy,
+	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+	struct scsi_qla_host *vha = qpair->vha;
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	struct nack_to_isp *nack;
+
+	if (!ha->flags.fw_started)
+		return;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
+	if (!pkt) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe049,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return;
+	}
+
+	if (vha->vha_tgt.qla_tgt != NULL)
+		vha->vha_tgt.qla_tgt->notify_ack_expected++;
+
+	pkt->entry_type = NOTIFY_ACK_TYPE;
+	pkt->entry_count = 1;
+
+	nack = (struct nack_to_isp *)pkt;
+	nack->ox_id = ntfy->ox_id;
+
+	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
+	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+		nack->u.isp24.flags = ntfy->u.isp24.flags &
+			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+	}
+	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+	nack->u.isp24.status = ntfy->u.isp24.status;
+	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+	nack->u.isp24.srr_reject_code = srr_reject_code;
+	nack->u.isp24.srr_reject_code_expl = srr_explan;
+	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe005,
+	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
+	    vha->vp_idx, nack->u.isp24.status);
+
+	/* Memory Barrier */
+	wmb();
+	qla2x00_start_iocbs(vha, qpair->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
+	struct abts_recv_from_24xx *abts, uint32_t status,
+	bool ids_reversed)
+{
+	struct scsi_qla_host *vha = qpair->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct abts_resp_to_24xx *resp;
+	uint32_t f_ctl;
+	uint8_t *p;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe006,
+	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+	    ha, abts, status);
+
+	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
+	    NULL);
+	if (!resp) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet", vha->vp_idx, __func__);
+		return;
+	}
+
+	resp->entry_type = ABTS_RESP_24XX;
+	resp->entry_count = 1;
+	resp->nport_handle = abts->nport_handle;
+	resp->vp_index = vha->vp_idx;
+	resp->sof_type = abts->sof_type;
+	resp->exchange_address = abts->exchange_address;
+	resp->fcp_hdr_le = abts->fcp_hdr_le;
+	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+	    F_CTL_SEQ_INITIATIVE);
+	p = (uint8_t *)&f_ctl;
+	resp->fcp_hdr_le.f_ctl[0] = *p++;
+	resp->fcp_hdr_le.f_ctl[1] = *p++;
+	resp->fcp_hdr_le.f_ctl[2] = *p;
+	if (ids_reversed) {
+		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+	} else {
+		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+	}
+	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+	if (status == FCP_TMF_CMPL) {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+		resp->payload.ba_acct.low_seq_cnt = 0x0000;
+		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+	} else {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+		resp->payload.ba_rjt.reason_code =
+			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+		/* Other bytes are zero */
+	}
+
+	vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+	struct abts_resp_from_24xx_fw *entry)
+{
+	struct ctio7_to_24xx *ctio;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe007,
+	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+
+	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
+	    vha->hw->base_qpair, NULL);
+	if (ctio == NULL) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return;
+	}
+
+	/*
+	 * We've got on entrance firmware's response on by us generated
+	 * ABTS response. So, in it ID fields are reversed.
+	 */
+
+	ctio->entry_type = CTIO_TYPE7;
+	ctio->entry_count = 1;
+	ctio->nport_handle = entry->nport_handle;
+	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
+	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->vp_index = vha->vp_idx;
+	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+	ctio->exchange_addr = entry->exchange_addr_to_abort;
+	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+					    CTIO7_FLAGS_TERMINATE);
+	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+
+	/* Memory Barrier */
+	wmb();
+	qla2x00_start_iocbs(vha, vha->req);
+
+	qlt_24xx_send_abts_resp(vha->hw->base_qpair,
+	    (struct abts_recv_from_24xx *)entry,
+	    FCP_TMF_CMPL, true);
+}
+
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
+{
+	struct qla_tgt_sess_op *op;
+	struct qla_tgt_cmd *cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+		if (tag == op->atio.u.isp24.exchange_addr) {
+			op->aborted = true;
+			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+			return 1;
+		}
+	}
+
+	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+		if (tag == op->atio.u.isp24.exchange_addr) {
+			op->aborted = true;
+			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+			return 1;
+		}
+	}
+
+	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+		if (tag == cmd->atio.u.isp24.exchange_addr) {
+			cmd->aborted = 1;
+			spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+			return 1;
+		}
+	}
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	return 0;
+}
+
+/* drop cmds for the given lun
+ * XXX only looks for cmds on the port through which lun reset was recieved
+ * XXX does not go through the list of other port (which may have cmds
+ *     for the same lun)
+ */
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
+			        u64 lun, uint8_t *s_id)
+{
+	struct qla_tgt_sess_op *op;
+	struct qla_tgt_cmd *cmd;
+	uint32_t key;
+	unsigned long flags;
+
+	key = sid_to_key(s_id);
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+		uint32_t op_key;
+		u64 op_lun;
+
+		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+		op_lun = scsilun_to_int(
+			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+		if (op_key == key && op_lun == lun)
+			op->aborted = true;
+	}
+
+	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+		uint32_t op_key;
+		u64 op_lun;
+
+		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+		op_lun = scsilun_to_int(
+			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+		if (op_key == key && op_lun == lun)
+			op->aborted = true;
+	}
+
+	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+		uint32_t cmd_key;
+		u64 cmd_lun;
+
+		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+		cmd_lun = scsilun_to_int(
+			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
+		if (cmd_key == key && cmd_lun == lun)
+			cmd->aborted = 1;
+	}
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts, struct fc_port *sess)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	int rc;
+
+	if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+		/* send TASK_ABORT response immediately */
+		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
+		return 0;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+	    "qla_target(%d): task abort (tag=%d)\n",
+	    vha->vp_idx, abts->exchange_addr_to_abort);
+
+	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+	if (mcmd == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
+		    vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+	memset(mcmd, 0, sizeof(*mcmd));
+
+	mcmd->sess = sess;
+	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+	mcmd->reset_count = ha->base_qpair->chip_reset;
+	mcmd->tmr_func = QLA_TGT_ABTS;
+	mcmd->qpair = ha->base_qpair;
+	mcmd->vha = vha;
+
+	/*
+	 * LUN is looked up by target-core internally based on the passed
+	 * abts->exchange_addr_to_abort tag.
+	 */
+	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
+	    abts->exchange_addr_to_abort);
+	if (rc != 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+		    "qla_target(%d):  tgt_ops->handle_tmr()"
+		    " failed: %d", vha->vp_idx, rc);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess;
+	uint32_t tag = abts->exchange_addr_to_abort;
+	uint8_t s_id[3];
+	int rc;
+	unsigned long flags;
+
+	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+		    "qla_target(%d): ABTS: Abort Sequence not "
+		    "supported\n", vha->vp_idx);
+		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+		    false);
+		return;
+	}
+
+	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+		    "qla_target(%d): ABTS: Unknown Exchange "
+		    "Address received\n", vha->vp_idx);
+		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+		    false);
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
+	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+	    le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+	s_id[0] = abts->fcp_hdr_le.s_id[2];
+	s_id[1] = abts->fcp_hdr_le.s_id[1];
+	s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+	if (!sess) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+		    "qla_target(%d): task abort for non-existant session\n",
+		    vha->vp_idx);
+		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
+		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+		if (rc != 0) {
+			qlt_24xx_send_abts_resp(ha->base_qpair, abts,
+			    FCP_TMF_REJECTED, false);
+		}
+		return;
+	}
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+
+	if (sess->deleted) {
+		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+		    false);
+		return;
+	}
+
+	rc = __qlt_24xx_handle_abts(vha, abts, sess);
+	if (rc != 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+		    vha->vp_idx, rc);
+		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+		    false);
+		return;
+	}
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
+	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+	struct scsi_qla_host *ha = mcmd->vha;
+	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+	struct ctio7_to_24xx *ctio;
+	uint16_t temp;
+
+	ql_dbg(ql_dbg_tgt, ha, 0xe008,
+	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+	    ha, atio, resp_code);
+
+
+	ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
+	if (ctio == NULL) {
+		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", ha->vp_idx, __func__);
+		return;
+	}
+
+	ctio->entry_type = CTIO_TYPE7;
+	ctio->entry_count = 1;
+	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+	ctio->nport_handle = mcmd->sess->loop_id;
+	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->vp_index = ha->vp_idx;
+	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio->exchange_addr = atio->u.isp24.exchange_addr;
+	temp = (atio->u.isp24.attr << 9)|
+		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
+	ctio->u.status1.flags = cpu_to_le16(temp);
+	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+	ctio->u.status1.ox_id = cpu_to_le16(temp);
+	ctio->u.status1.scsi_status =
+	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+	ctio->u.status1.response_len = cpu_to_le16(8);
+	ctio->u.status1.sense_data[0] = resp_code;
+
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(ha, qpair->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
+    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+	struct atio_from_isp *atio = &cmd->atio;
+	struct ctio7_to_24xx *ctio;
+	uint16_t temp;
+	struct scsi_qla_host *vha = cmd->vha;
+
+	ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+	    "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+	    "sense_key=%02x, asc=%02x, ascq=%02x",
+	    vha, atio, scsi_status, sense_key, asc, ascq);
+
+	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+	if (!ctio) {
+		ql_dbg(ql_dbg_async, vha, 0x3067,
+		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
+		    vha->host_no, __func__);
+		goto out;
+	}
+
+	ctio->entry_type = CTIO_TYPE7;
+	ctio->entry_count = 1;
+	ctio->handle = QLA_TGT_SKIP_HANDLE;
+	ctio->nport_handle = cmd->sess->loop_id;
+	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->vp_index = vha->vp_idx;
+	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio->exchange_addr = atio->u.isp24.exchange_addr;
+	temp = (atio->u.isp24.attr << 9) |
+	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
+	ctio->u.status1.flags = cpu_to_le16(temp);
+	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+	ctio->u.status1.ox_id = cpu_to_le16(temp);
+	ctio->u.status1.scsi_status =
+	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+	ctio->u.status1.response_len = cpu_to_le16(18);
+	ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+	if (ctio->u.status1.residual != 0)
+		ctio->u.status1.scsi_status |=
+		    cpu_to_le16(SS_RESIDUAL_UNDER);
+
+	/* Fixed format sense data. */
+	ctio->u.status1.sense_data[0] = 0x70;
+	ctio->u.status1.sense_data[2] = sense_key;
+	/* Additional sense length */
+	ctio->u.status1.sense_data[7] = 0xa;
+	/* ASC and ASCQ */
+	ctio->u.status1.sense_data[12] = asc;
+	ctio->u.status1.sense_data[13] = ascq;
+
+	/* Memory Barrier */
+	wmb();
+
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+
+out:
+	return;
+}
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	struct scsi_qla_host *vha = mcmd->sess->vha;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	struct qla_qpair *qpair = mcmd->qpair;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+	    "TM response mcmd (%p) status %#x state %#x",
+	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+
+	if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
+		/*
+		 * Either the port is not online or this request was from
+		 * previous life, just abort the processing.
+		 */
+		ql_dbg(ql_dbg_async, vha, 0xe100,
+			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
+			vha->flags.online, qla2x00_reset_active(vha),
+			mcmd->reset_count, qpair->chip_reset);
+		ha->tgt.tgt_ops->free_mcmd(mcmd);
+		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+		return;
+	}
+
+	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
+		if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+		    ELS_LOGO ||
+		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+		    ELS_PRLO ||
+		    mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+		    ELS_TPRLO) {
+			ql_dbg(ql_dbg_disc, vha, 0x2106,
+			    "TM response logo %8phC status %#x state %#x",
+			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
+			    mcmd->flags);
+			qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+		} else {
+			qlt_send_notify_ack(vha->hw->base_qpair,
+			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
+		}
+	} else {
+		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
+			qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
+			    mcmd->fc_tm_rsp, false);
+		else
+			qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
+			    mcmd->fc_tm_rsp);
+	}
+	/*
+	 * Make the callback for ->free_mcmd() to queue_work() and invoke
+	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
+	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+	 * qlt_xmit_tm_rsp() returns here..
+	 */
+	ha->tgt.tgt_ops->free_mcmd(mcmd);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+	struct qla_tgt_cmd *cmd = prm->cmd;
+
+	BUG_ON(cmd->sg_cnt == 0);
+
+	prm->sg = (struct scatterlist *)cmd->sg;
+	prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+	    cmd->sg_cnt, cmd->dma_data_direction);
+	if (unlikely(prm->seg_cnt == 0))
+		goto out_err;
+
+	prm->cmd->sg_mapped = 1;
+
+	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
+		/*
+		 * If greater than four sg entries then we need to allocate
+		 * the continuation entries
+		 */
+		if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
+			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+			QLA_TGT_DATASEGS_PER_CMD_24XX,
+			QLA_TGT_DATASEGS_PER_CONT_24XX);
+	} else {
+		/* DIF */
+		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
+			prm->tot_dsds = prm->seg_cnt;
+		} else
+			prm->tot_dsds = prm->seg_cnt;
+
+		if (cmd->prot_sg_cnt) {
+			prm->prot_sg      = cmd->prot_sg;
+			prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+				cmd->prot_sg, cmd->prot_sg_cnt,
+				cmd->dma_data_direction);
+			if (unlikely(prm->prot_seg_cnt == 0))
+				goto out_err;
+
+			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+				/* Dif Bundling not support here */
+				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
+								cmd->blk_sz);
+				prm->tot_dsds += prm->prot_seg_cnt;
+			} else
+				prm->tot_dsds += prm->prot_seg_cnt;
+		}
+	}
+
+	return 0;
+
+out_err:
+	ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
+	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+	    0, prm->cmd->sg_cnt);
+	return -1;
+}
+
+static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+	struct qla_hw_data *ha;
+	struct qla_qpair *qpair;
+	if (!cmd->sg_mapped)
+		return;
+
+	qpair = cmd->qpair;
+
+	pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+	    cmd->dma_data_direction);
+	cmd->sg_mapped = 0;
+
+	if (cmd->prot_sg_cnt)
+		pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+			cmd->dma_data_direction);
+
+	if (!cmd->ctx)
+		return;
+	ha = vha->hw;
+	if (cmd->ctx_dsd_alloced)
+		qla2x00_clean_dsd_pool(ha, cmd->ctx);
+
+	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
+}
+
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
+	uint32_t req_cnt)
+{
+	uint32_t cnt;
+	struct req_que *req = qpair->req;
+
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out));
+
+		if  (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length - (req->ring_index - cnt);
+
+		if (unlikely(req->cnt < (req_cnt + 2)))
+			return -EAGAIN;
+	}
+
+	req->cnt -= req_cnt;
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct req_que *req)
+{
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+	return (cont_entry_t *)req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
+{
+	uint32_t h;
+	int index;
+	uint8_t found = 0;
+	struct req_que *req = qpair->req;
+
+	h = req->current_outstanding_cmd;
+
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		h++;
+		if (h == req->num_outstanding_cmds)
+			h = 1;
+
+		if (h == QLA_TGT_SKIP_HANDLE)
+			continue;
+
+		if (!req->outstanding_cmds[h]) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found) {
+		req->current_outstanding_cmd = h;
+	} else {
+		ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
+		    "qla_target(%d): Ran out of empty cmd slots\n",
+		    qpair->vha->vp_idx);
+		h = QLA_TGT_NULL_HANDLE;
+	}
+
+	return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
+	struct qla_tgt_prm *prm)
+{
+	uint32_t h;
+	struct ctio7_to_24xx *pkt;
+	struct atio_from_isp *atio = &prm->cmd->atio;
+	uint16_t temp;
+
+	pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
+	prm->pkt = pkt;
+	memset(pkt, 0, sizeof(*pkt));
+
+	pkt->entry_type = CTIO_TYPE7;
+	pkt->entry_count = (uint8_t)prm->req_cnt;
+	pkt->vp_index = prm->cmd->vp_idx;
+
+	h = qlt_make_handle(qpair);
+	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+		/*
+		 * CTIO type 7 from the firmware doesn't provide a way to
+		 * know the initiator's LOOP ID, hence we can't find
+		 * the session and, so, the command.
+		 */
+		return -EAGAIN;
+	} else
+		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
+
+	pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
+	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
+	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	pkt->exchange_addr = atio->u.isp24.exchange_addr;
+	temp = atio->u.isp24.attr << 9;
+	pkt->u.status0.flags |= cpu_to_le16(temp);
+	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+	pkt->u.status0.ox_id = cpu_to_le16(temp);
+	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+	return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
+{
+	int cnt;
+	uint32_t *dword_ptr;
+
+	/* Build continuation packets */
+	while (prm->seg_cnt > 0) {
+		cont_a64_entry_t *cont_pkt64 =
+			(cont_a64_entry_t *)qlt_get_req_pkt(
+			   prm->cmd->qpair->req);
+
+		/*
+		 * Make sure that from cont_pkt64 none of
+		 * 64-bit specific fields used for 32-bit
+		 * addressing. Cast to (cont_entry_t *) for
+		 * that.
+		 */
+
+		memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+		cont_pkt64->entry_count = 1;
+		cont_pkt64->sys_define = 0;
+
+		cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+		dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
+
+		/* Load continuation entry data segments */
+		for (cnt = 0;
+		    cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
+		    cnt++, prm->seg_cnt--) {
+			*dword_ptr++ =
+			    cpu_to_le32(pci_dma_lo32
+				(sg_dma_address(prm->sg)));
+			*dword_ptr++ = cpu_to_le32(pci_dma_hi32
+			    (sg_dma_address(prm->sg)));
+			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+			prm->sg = sg_next(prm->sg);
+		}
+	}
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm)
+{
+	int cnt;
+	uint32_t *dword_ptr;
+	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+	/* Setup packet address segment pointer */
+	dword_ptr = pkt24->u.status0.dseg_0_address;
+
+	/* Set total data segment count */
+	if (prm->seg_cnt)
+		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+	if (prm->seg_cnt == 0) {
+		/* No data transfer */
+		*dword_ptr++ = 0;
+		*dword_ptr = 0;
+		return;
+	}
+
+	/* If scatter gather */
+
+	/* Load command entry data segments */
+	for (cnt = 0;
+	    (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
+	    cnt++, prm->seg_cnt--) {
+		*dword_ptr++ =
+		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+
+		*dword_ptr++ = cpu_to_le32(pci_dma_hi32(
+			sg_dma_address(prm->sg)));
+
+		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+		prm->sg = sg_next(prm->sg);
+	}
+
+	qlt_load_cont_data_segments(prm);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+	return cmd->bufflen > 0;
+}
+
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+	struct qla_tgt_cmd *cmd;
+	struct scsi_qla_host *vha;
+
+	/* asc 0x10=dif error */
+	if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+		cmd = prm->cmd;
+		vha = cmd->vha;
+		/* ASCQ */
+		switch (prm->sense_buffer[13]) {
+		case 1:
+			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
+			    "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+			    "se_cmd=%p tag[%x]",
+			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+			    cmd->atio.u.isp24.exchange_addr);
+			break;
+		case 2:
+			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
+			    "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+			    "se_cmd=%p tag[%x]",
+			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+			    cmd->atio.u.isp24.exchange_addr);
+			break;
+		case 3:
+			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
+			    "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+			    "se_cmd=%p tag[%x]",
+			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+			    cmd->atio.u.isp24.exchange_addr);
+			break;
+		default:
+			ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
+			    "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+			    "se_cmd=%p tag[%x]",
+			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+			    cmd->atio.u.isp24.exchange_addr);
+			break;
+		}
+		ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
+	}
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+	uint32_t *full_req_cnt)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct qla_qpair *qpair = cmd->qpair;
+
+	prm->cmd = cmd;
+	prm->tgt = cmd->tgt;
+	prm->pkt = NULL;
+	prm->rq_result = scsi_status;
+	prm->sense_buffer = &cmd->sense_buffer[0];
+	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+	prm->sg = NULL;
+	prm->seg_cnt = -1;
+	prm->req_cnt = 1;
+	prm->residual = 0;
+	prm->add_status_pkt = 0;
+	prm->prot_sg = NULL;
+	prm->prot_seg_cnt = 0;
+	prm->tot_dsds = 0;
+
+	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+		if  (qlt_pci_map_calc_cnt(prm) != 0)
+			return -EAGAIN;
+	}
+
+	*full_req_cnt = prm->req_cnt;
+
+	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		prm->residual = se_cmd->residual_count;
+		ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
+		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
+		       prm->residual, se_cmd->tag,
+		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+		       cmd->bufflen, prm->rq_result);
+		prm->rq_result |= SS_RESIDUAL_UNDER;
+	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+		prm->residual = se_cmd->residual_count;
+		ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
+		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
+		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
+		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
+		prm->rq_result |= SS_RESIDUAL_OVER;
+	}
+
+	if (xmit_type & QLA_TGT_XMIT_STATUS) {
+		/*
+		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+		 * ignored in *xmit_response() below
+		 */
+		if (qlt_has_data(cmd)) {
+			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+			    (IS_FWI2_CAPABLE(cmd->vha->hw) &&
+			    (prm->rq_result != 0))) {
+				prm->add_status_pkt = 1;
+				(*full_req_cnt)++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
+    int sending_sense)
+{
+	if (cmd->qpair->enable_class_2)
+		return 0;
+
+	if (sending_sense)
+		return cmd->conf_compl_supported;
+	else
+		return cmd->qpair->enable_explicit_conf &&
+                    cmd->conf_compl_supported;
+}
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+	struct qla_tgt_prm *prm)
+{
+	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+	    (uint32_t)sizeof(ctio->u.status1.sense_data));
+	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+	if (qlt_need_explicit_conf(prm->cmd, 0)) {
+		ctio->u.status0.flags |= cpu_to_le16(
+		    CTIO7_FLAGS_EXPLICIT_CONFORM |
+		    CTIO7_FLAGS_CONFORM_REQ);
+	}
+	ctio->u.status0.residual = cpu_to_le32(prm->residual);
+	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+		int i;
+
+		if (qlt_need_explicit_conf(prm->cmd, 1)) {
+			if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
+				ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
+				    "Skipping EXPLICIT_CONFORM and "
+				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+				    "non GOOD status\n");
+				goto skip_explict_conf;
+			}
+			ctio->u.status1.flags |= cpu_to_le16(
+			    CTIO7_FLAGS_EXPLICIT_CONFORM |
+			    CTIO7_FLAGS_CONFORM_REQ);
+		}
+skip_explict_conf:
+		ctio->u.status1.flags &=
+		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+		ctio->u.status1.flags |=
+		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+		ctio->u.status1.scsi_status |=
+		    cpu_to_le16(SS_SENSE_LEN_VALID);
+		ctio->u.status1.sense_length =
+		    cpu_to_le16(prm->sense_buffer_len);
+		for (i = 0; i < prm->sense_buffer_len/4; i++)
+			((uint32_t *)ctio->u.status1.sense_data)[i] =
+				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+
+		qlt_print_dif_err(prm);
+
+	} else {
+		ctio->u.status1.flags &=
+		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+		ctio->u.status1.flags |=
+		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+		ctio->u.status1.sense_length = 0;
+		memset(ctio->u.status1.sense_data, 0,
+		    sizeof(ctio->u.status1.sense_data));
+	}
+
+	/* Sense with len > 24, is it possible ??? */
+}
+
+static inline int
+qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
+{
+	switch (se_cmd->prot_op) {
+	case TARGET_PROT_DOUT_INSERT:
+	case TARGET_PROT_DIN_STRIP:
+		if (ql2xenablehba_err_chk >= 1)
+			return 1;
+		break;
+	case TARGET_PROT_DOUT_PASS:
+	case TARGET_PROT_DIN_PASS:
+		if (ql2xenablehba_err_chk >= 2)
+			return 1;
+		break;
+	case TARGET_PROT_DIN_INSERT:
+	case TARGET_PROT_DOUT_STRIP:
+		return 1;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
+{
+	switch (se_cmd->prot_op) {
+	case TARGET_PROT_DIN_INSERT:
+	case TARGET_PROT_DOUT_INSERT:
+	case TARGET_PROT_DIN_STRIP:
+	case TARGET_PROT_DOUT_STRIP:
+	case TARGET_PROT_DIN_PASS:
+	case TARGET_PROT_DOUT_PASS:
+	    return 1;
+	default:
+	    return 0;
+	}
+	return 0;
+}
+
+/*
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
+ */
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+    uint16_t *pfw_prot_opts)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+	scsi_qla_host_t *vha = cmd->tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t t32 = 0;
+
+	/*
+	 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
+	 * have been immplemented by TCM, before AppTag is avail.
+	 * Look for modesense_handlers[]
+	 */
+	ctx->app_tag = 0;
+	ctx->app_tag_mask[0] = 0x0;
+	ctx->app_tag_mask[1] = 0x0;
+
+	if (IS_PI_UNINIT_CAPABLE(ha)) {
+		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+			*pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+			*pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+	}
+
+	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
+	switch (se_cmd->prot_type) {
+	case TARGET_DIF_TYPE0_PROT:
+		/*
+		 * No check for ql2xenablehba_err_chk, as it
+		 * would be an I/O error if hba tag generation
+		 * is not done.
+		 */
+		ctx->ref_tag = cpu_to_le32(lba);
+		/* enable ALL bytes of the ref tag */
+		ctx->ref_tag_mask[0] = 0xff;
+		ctx->ref_tag_mask[1] = 0xff;
+		ctx->ref_tag_mask[2] = 0xff;
+		ctx->ref_tag_mask[3] = 0xff;
+		break;
+	case TARGET_DIF_TYPE1_PROT:
+	    /*
+	     * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+	     * REF tag, and 16 bit app tag.
+	     */
+	    ctx->ref_tag = cpu_to_le32(lba);
+	    if (!qla_tgt_ref_mask_check(se_cmd) ||
+		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+		    break;
+	    }
+	    /* enable ALL bytes of the ref tag */
+	    ctx->ref_tag_mask[0] = 0xff;
+	    ctx->ref_tag_mask[1] = 0xff;
+	    ctx->ref_tag_mask[2] = 0xff;
+	    ctx->ref_tag_mask[3] = 0xff;
+	    break;
+	case TARGET_DIF_TYPE2_PROT:
+	    /*
+	     * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+	     * tag has to match LBA in CDB + N
+	     */
+	    ctx->ref_tag = cpu_to_le32(lba);
+	    if (!qla_tgt_ref_mask_check(se_cmd) ||
+		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+		    break;
+	    }
+	    /* enable ALL bytes of the ref tag */
+	    ctx->ref_tag_mask[0] = 0xff;
+	    ctx->ref_tag_mask[1] = 0xff;
+	    ctx->ref_tag_mask[2] = 0xff;
+	    ctx->ref_tag_mask[3] = 0xff;
+	    break;
+	case TARGET_DIF_TYPE3_PROT:
+	    /* For TYPE 3 protection: 16 bit GUARD only */
+	    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+	    ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+		ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+	    break;
+	}
+}
+
+static inline int
+qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
+{
+	uint32_t		*cur_dsd;
+	uint32_t		transfer_length = 0;
+	uint32_t		data_bytes;
+	uint32_t		dif_bytes;
+	uint8_t			bundling = 1;
+	uint8_t			*clr_ptr;
+	struct crc_context	*crc_ctx_pkt = NULL;
+	struct qla_hw_data	*ha;
+	struct ctio_crc2_to_fw	*pkt;
+	dma_addr_t		crc_ctx_dma;
+	uint16_t		fw_prot_opts = 0;
+	struct qla_tgt_cmd	*cmd = prm->cmd;
+	struct se_cmd		*se_cmd = &cmd->se_cmd;
+	uint32_t h;
+	struct atio_from_isp *atio = &prm->cmd->atio;
+	struct qla_tc_param	tc;
+	uint16_t t16;
+	scsi_qla_host_t *vha = cmd->vha;
+
+	ha = vha->hw;
+
+	pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
+	prm->pkt = pkt;
+	memset(pkt, 0, sizeof(*pkt));
+
+	ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
+		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
+		cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
+		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
+
+	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
+	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
+		bundling = 0;
+
+	/* Compute dif len and adjust data len to incude protection */
+	data_bytes = cmd->bufflen;
+	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
+
+	switch (se_cmd->prot_op) {
+	case TARGET_PROT_DIN_INSERT:
+	case TARGET_PROT_DOUT_STRIP:
+		transfer_length = data_bytes;
+		if (cmd->prot_sg_cnt)
+			data_bytes += dif_bytes;
+		break;
+	case TARGET_PROT_DIN_STRIP:
+	case TARGET_PROT_DOUT_INSERT:
+	case TARGET_PROT_DIN_PASS:
+	case TARGET_PROT_DOUT_PASS:
+		transfer_length = data_bytes + dif_bytes;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	if (!qlt_hba_err_chk_enabled(se_cmd))
+		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+	/* HBA error checking enabled */
+	else if (IS_PI_UNINIT_CAPABLE(ha)) {
+		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
+		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+	}
+
+	switch (se_cmd->prot_op) {
+	case TARGET_PROT_DIN_INSERT:
+	case TARGET_PROT_DOUT_INSERT:
+		fw_prot_opts |= PO_MODE_DIF_INSERT;
+		break;
+	case TARGET_PROT_DIN_STRIP:
+	case TARGET_PROT_DOUT_STRIP:
+		fw_prot_opts |= PO_MODE_DIF_REMOVE;
+		break;
+	case TARGET_PROT_DIN_PASS:
+	case TARGET_PROT_DOUT_PASS:
+		fw_prot_opts |= PO_MODE_DIF_PASS;
+		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
+		break;
+	default:/* Normal Request */
+		fw_prot_opts |= PO_MODE_DIF_PASS;
+		break;
+	}
+
+	/* ---- PKT ---- */
+	/* Update entry type to indicate Command Type CRC_2 IOCB */
+	pkt->entry_type  = CTIO_CRC2;
+	pkt->entry_count = 1;
+	pkt->vp_index = cmd->vp_idx;
+
+	h = qlt_make_handle(qpair);
+	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+		/*
+		 * CTIO type 7 from the firmware doesn't provide a way to
+		 * know the initiator's LOOP ID, hence we can't find
+		 * the session and, so, the command.
+		 */
+		return -EAGAIN;
+	} else
+		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
+
+	pkt->handle  = MAKE_HANDLE(qpair->req->id, h);
+	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
+	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
+	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
+
+	/* silence compile warning */
+	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+	pkt->ox_id  = cpu_to_le16(t16);
+
+	t16 = (atio->u.isp24.attr << 9);
+	pkt->flags |= cpu_to_le16(t16);
+	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
+
+	/* Set transfer direction */
+	if (cmd->dma_data_direction == DMA_TO_DEVICE)
+		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+
+	pkt->dseg_count = prm->tot_dsds;
+	/* Fibre channel byte count */
+	pkt->transfer_length = cpu_to_le32(transfer_length);
+
+	/* ----- CRC context -------- */
+
+	/* Allocate CRC context from global pool */
+	crc_ctx_pkt = cmd->ctx =
+	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+
+	if (!crc_ctx_pkt)
+		goto crc_queuing_error;
+
+	/* Zero out CTX area. */
+	clr_ptr = (uint8_t *)crc_ctx_pkt;
+	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+	/* Set handle */
+	crc_ctx_pkt->handle = pkt->handle;
+
+	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
+
+	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+	if (!bundling) {
+		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+	} else {
+		/*
+		 * Configure Bundling if we need to fetch interlaving
+		 * protection PCI accesses
+		 */
+		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+		crc_ctx_pkt->u.bundling.dseg_count =
+			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
+		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+	}
+
+	/* Finish the common fields of CRC pkt */
+	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
+	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
+	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
+
+	memset((uint8_t *)&tc, 0 , sizeof(tc));
+	tc.vha = vha;
+	tc.blk_sz = cmd->blk_sz;
+	tc.bufflen = cmd->bufflen;
+	tc.sg = cmd->sg;
+	tc.prot_sg = cmd->prot_sg;
+	tc.ctx = crc_ctx_pkt;
+	tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
+
+	/* Walks data segments */
+	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+
+	if (!bundling && prm->prot_seg_cnt) {
+		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
+			prm->tot_dsds, &tc))
+			goto crc_queuing_error;
+	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
+		(prm->tot_dsds - prm->prot_seg_cnt), &tc))
+		goto crc_queuing_error;
+
+	if (bundling && prm->prot_seg_cnt) {
+		/* Walks dif segments */
+		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
+
+		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
+			prm->prot_seg_cnt, &tc))
+			goto crc_queuing_error;
+	}
+	return QLA_SUCCESS;
+
+crc_queuing_error:
+	/* Cleanup will be performed by the caller */
+	qpair->req->outstanding_cmds[h] = NULL;
+
+	return QLA_FUNCTION_FAILED;
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+	uint8_t scsi_status)
+{
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_qpair *qpair = cmd->qpair;
+	struct ctio7_to_24xx *pkt;
+	struct qla_tgt_prm prm;
+	uint32_t full_req_cnt = 0;
+	unsigned long flags = 0;
+	int res;
+
+	if (cmd->sess && cmd->sess->deleted) {
+		cmd->state = QLA_TGT_STATE_PROCESSED;
+		if (cmd->sess->logout_completed)
+			/* no need to terminate. FW already freed exchange. */
+			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+		else
+			qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
+		return 0;
+	}
+
+	ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
+	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
+	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
+	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
+	    &cmd->se_cmd, qpair->id);
+
+	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+	    &full_req_cnt);
+	if (unlikely(res != 0)) {
+		return res;
+	}
+
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+
+	if (xmit_type == QLA_TGT_XMIT_STATUS)
+		qpair->tgt_counters.core_qla_snd_status++;
+	else
+		qpair->tgt_counters.core_qla_que_buf++;
+
+	if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
+		/*
+		 * Either the port is not online or this request was from
+		 * previous life, just abort the processing.
+		 */
+		cmd->state = QLA_TGT_STATE_PROCESSED;
+		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+		ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
+			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
+			vha->flags.online, qla2x00_reset_active(vha),
+			cmd->reset_count, qpair->chip_reset);
+		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+		return 0;
+	}
+
+	/* Does F/W have an IOCBs for this request */
+	res = qlt_check_reserve_free_req(qpair, full_req_cnt);
+	if (unlikely(res))
+		goto out_unmap_unlock;
+
+	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
+		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
+	else
+		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
+	if (unlikely(res != 0)) {
+		qpair->req->cnt += full_req_cnt;
+		goto out_unmap_unlock;
+	}
+
+	pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+		pkt->u.status0.flags |=
+		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+			CTIO7_FLAGS_STATUS_MODE_0);
+
+		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+			qlt_load_data_segments(&prm);
+
+		if (prm.add_status_pkt == 0) {
+			if (xmit_type & QLA_TGT_XMIT_STATUS) {
+				pkt->u.status0.scsi_status =
+				    cpu_to_le16(prm.rq_result);
+				pkt->u.status0.residual =
+				    cpu_to_le32(prm.residual);
+				pkt->u.status0.flags |= cpu_to_le16(
+				    CTIO7_FLAGS_SEND_STATUS);
+				if (qlt_need_explicit_conf(cmd, 0)) {
+					pkt->u.status0.flags |=
+					    cpu_to_le16(
+						CTIO7_FLAGS_EXPLICIT_CONFORM |
+						CTIO7_FLAGS_CONFORM_REQ);
+				}
+			}
+
+		} else {
+			/*
+			 * We have already made sure that there is sufficient
+			 * amount of request entries to not drop HW lock in
+			 * req_pkt().
+			 */
+			struct ctio7_to_24xx *ctio =
+				(struct ctio7_to_24xx *)qlt_get_req_pkt(
+				    qpair->req);
+
+			ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
+			    "Building additional status packet 0x%p.\n",
+			    ctio);
+
+			/*
+			 * T10Dif: ctio_crc2_to_fw overlay ontop of
+			 * ctio7_to_24xx
+			 */
+			memcpy(ctio, pkt, sizeof(*ctio));
+			/* reset back to CTIO7 */
+			ctio->entry_count = 1;
+			ctio->entry_type = CTIO_TYPE7;
+			ctio->dseg_count = 0;
+			ctio->u.status1.flags &= ~cpu_to_le16(
+			    CTIO7_FLAGS_DATA_IN);
+
+			/* Real finish is ctio_m1's finish */
+			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+			pkt->u.status0.flags |= cpu_to_le16(
+			    CTIO7_FLAGS_DONT_RET_CTIO);
+
+			/* qlt_24xx_init_ctio_to_isp will correct
+			 * all neccessary fields that's part of CTIO7.
+			 * There should be no residual of CTIO-CRC2 data.
+			 */
+			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+			    &prm);
+		}
+	} else
+		qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+	cmd->cmd_sent_to_fw = 1;
+
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	return 0;
+
+out_unmap_unlock:
+	qlt_unmap_sg(vha, cmd);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+	struct ctio7_to_24xx *pkt;
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_tgt *tgt = cmd->tgt;
+	struct qla_tgt_prm prm;
+	unsigned long flags = 0;
+	int res = 0;
+	struct qla_qpair *qpair = cmd->qpair;
+
+	memset(&prm, 0, sizeof(prm));
+	prm.cmd = cmd;
+	prm.tgt = tgt;
+	prm.sg = NULL;
+	prm.req_cnt = 1;
+
+	/* Calculate number of entries and segments required */
+	if (qlt_pci_map_calc_cnt(&prm) != 0)
+		return -EAGAIN;
+
+	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
+	    (cmd->sess && cmd->sess->deleted)) {
+		/*
+		 * Either the port is not online or this request was from
+		 * previous life, just abort the processing.
+		 */
+		cmd->state = QLA_TGT_STATE_NEED_DATA;
+		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+		ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
+			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
+			vha->flags.online, qla2x00_reset_active(vha),
+			cmd->reset_count, qpair->chip_reset);
+		return 0;
+	}
+
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	/* Does F/W have an IOCBs for this request */
+	res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
+	if (res != 0)
+		goto out_unlock_free_unmap;
+	if (cmd->se_cmd.prot_op)
+		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
+	else
+		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
+
+	if (unlikely(res != 0)) {
+		qpair->req->cnt += prm.req_cnt;
+		goto out_unlock_free_unmap;
+	}
+
+	pkt = (struct ctio7_to_24xx *)prm.pkt;
+	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+	    CTIO7_FLAGS_STATUS_MODE_0);
+
+	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+		qlt_load_data_segments(&prm);
+
+	cmd->state = QLA_TGT_STATE_NEED_DATA;
+	cmd->cmd_sent_to_fw = 1;
+
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	return res;
+
+out_unlock_free_unmap:
+	qlt_unmap_sg(vha, cmd);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+
+/*
+ * it is assumed either hardware_lock or qpair lock is held.
+ */
+static void
+qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
+	struct ctio_crc_from_fw *sts)
+{
+	uint8_t		*ap = &sts->actual_dif[0];
+	uint8_t		*ep = &sts->expected_dif[0];
+	uint64_t	lba = cmd->se_cmd.t_task_lba;
+	uint8_t scsi_status, sense_key, asc, ascq;
+	unsigned long flags;
+	struct scsi_qla_host *vha = cmd->vha;
+
+	cmd->trc_flags |= TRC_DIF_ERR;
+
+	cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
+	cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+	cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+
+	cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
+	cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+	cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+
+	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
+
+	scsi_status = sense_key = asc = ascq = 0;
+
+	/* check appl tag */
+	if (cmd->e_app_tag != cmd->a_app_tag) {
+		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
+		    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
+		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+		    cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+		cmd->dif_err_code = DIF_ERR_APP;
+		scsi_status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ABORTED_COMMAND;
+		asc = 0x10;
+		ascq = 0x2;
+	}
+
+	/* check ref tag */
+	if (cmd->e_ref_tag != cmd->a_ref_tag) {
+		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
+		    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
+		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+		    cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+		cmd->dif_err_code = DIF_ERR_REF;
+		scsi_status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ABORTED_COMMAND;
+		asc = 0x10;
+		ascq = 0x3;
+		goto out;
+	}
+
+	/* check guard */
+	if (cmd->e_guard != cmd->a_guard) {
+		ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
+		    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
+		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+		    cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+		cmd->dif_err_code = DIF_ERR_GRD;
+		scsi_status = SAM_STAT_CHECK_CONDITION;
+		sense_key = ABORTED_COMMAND;
+		asc = 0x10;
+		ascq = 0x1;
+	}
+out:
+	switch (cmd->state) {
+	case QLA_TGT_STATE_NEED_DATA:
+		/* handle_data will load DIF error code  */
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+		vha->hw->tgt.tgt_ops->handle_data(cmd);
+		break;
+	default:
+		spin_lock_irqsave(&cmd->cmd_lock, flags);
+		if (cmd->aborted) {
+			spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+			vha->hw->tgt.tgt_ops->free_cmd(cmd);
+			break;
+		}
+		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+		qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
+		    ascq);
+		/* assume scsi status gets out on the wire.
+		 * Will not wait for completion.
+		 */
+		vha->hw->tgt.tgt_ops->free_cmd(cmd);
+		break;
+	}
+}
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *ntfy)
+{
+	struct nack_to_isp *nack;
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	int ret = 0;
+
+	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
+	    "Sending TERM ELS CTIO (ha=%p)\n", ha);
+
+	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+	if (pkt == NULL) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe080,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+
+	pkt->entry_type = NOTIFY_ACK_TYPE;
+	pkt->entry_count = 1;
+	pkt->handle = QLA_TGT_SKIP_HANDLE;
+
+	nack = (struct nack_to_isp *)pkt;
+	nack->ox_id = ntfy->ox_id;
+
+	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+		nack->u.isp24.flags = ntfy->u.isp24.flags &
+			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+	}
+
+	/* terminate */
+	nack->u.isp24.flags |=
+		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
+
+	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+	nack->u.isp24.status = ntfy->u.isp24.status;
+	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+	qla2x00_start_iocbs(vha, vha->req);
+	return ret;
+}
+
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *imm, int ha_locked)
+{
+	unsigned long flags = 0;
+	int rc;
+
+	if (ha_locked) {
+		rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0	/* Todo  */
+		if (rc == -ENOMEM)
+			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+		if (rc) {
+		}
+#endif
+		goto done;
+	}
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+	rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0	/* Todo */
+	if (rc == -ENOMEM)
+		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+
+done:
+	if (!ha_locked)
+		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+/*
+ * If hardware_lock held on entry, might drop it, then reaquire
+ * This function sends the appropriate CTIO to ISP 2xxx or 24xx
+ */
+static int __qlt_send_term_exchange(struct qla_qpair *qpair,
+	struct qla_tgt_cmd *cmd,
+	struct atio_from_isp *atio)
+{
+	struct scsi_qla_host *vha = qpair->vha;
+	struct ctio7_to_24xx *ctio24;
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	int ret = 0;
+	uint16_t temp;
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+	if (cmd)
+		vha = cmd->vha;
+
+	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
+	if (pkt == NULL) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe050,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet\n", vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+
+	if (cmd != NULL) {
+		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe051,
+			    "qla_target(%d): Terminating cmd %p with "
+			    "incorrect state %d\n", vha->vp_idx, cmd,
+			    cmd->state);
+		} else
+			ret = 1;
+	}
+
+	qpair->tgt_counters.num_term_xchg_sent++;
+	pkt->entry_count = 1;
+	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+	ctio24 = (struct ctio7_to_24xx *)pkt;
+	ctio24->entry_type = CTIO_TYPE7;
+	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
+	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio24->vp_index = vha->vp_idx;
+	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
+		CTIO7_FLAGS_TERMINATE;
+	ctio24->u.status1.flags = cpu_to_le16(temp);
+	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+	ctio24->u.status1.ox_id = cpu_to_le16(temp);
+
+	/* Most likely, it isn't needed */
+	ctio24->u.status1.residual = get_unaligned((uint32_t *)
+	    &atio->u.isp24.fcp_cmnd.add_cdb[
+	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
+	if (ctio24->u.status1.residual != 0)
+		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+	return ret;
+}
+
+static void qlt_send_term_exchange(struct qla_qpair *qpair,
+	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
+	int ul_abort)
+{
+	struct scsi_qla_host *vha;
+	unsigned long flags = 0;
+	int rc;
+
+	/* why use different vha? NPIV */
+	if (cmd)
+		vha = cmd->vha;
+	else
+		vha = qpair->vha;
+
+	if (ha_locked) {
+		rc = __qlt_send_term_exchange(qpair, cmd, atio);
+		if (rc == -ENOMEM)
+			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
+		goto done;
+	}
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	rc = __qlt_send_term_exchange(qpair, cmd, atio);
+	if (rc == -ENOMEM)
+		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
+
+done:
+	if (cmd && !ul_abort && !cmd->aborted) {
+		if (cmd->sg_mapped)
+			qlt_unmap_sg(vha, cmd);
+		vha->hw->tgt.tgt_ops->free_cmd(cmd);
+	}
+
+	if (!ha_locked)
+		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	return;
+}
+
+static void qlt_init_term_exchange(struct scsi_qla_host *vha)
+{
+	struct list_head free_list;
+	struct qla_tgt_cmd *cmd, *tcmd;
+
+	vha->hw->tgt.leak_exchg_thresh_hold =
+	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
+
+	cmd = tcmd = NULL;
+	if (!list_empty(&vha->hw->tgt.q_full_list)) {
+		INIT_LIST_HEAD(&free_list);
+		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
+
+		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+			list_del(&cmd->cmd_list);
+			/* This cmd was never sent to TCM.  There is no need
+			 * to schedule free or call free_cmd
+			 */
+			qlt_free_cmd(cmd);
+			vha->hw->tgt.num_qfull_cmds_alloc--;
+		}
+	}
+	vha->hw->tgt.num_qfull_cmds_dropped = 0;
+}
+
+static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+{
+	uint32_t total_leaked;
+
+	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
+
+	if (vha->hw->tgt.leak_exchg_thresh_hold &&
+	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
+
+		ql_dbg(ql_dbg_tgt, vha, 0xe079,
+		    "Chip reset due to exchange starvation: %d/%d.\n",
+		    total_leaked, vha->hw->cur_fw_xcb_count);
+
+		if (IS_P3P_TYPE(vha->hw))
+			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+		else
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+	}
+
+}
+
+int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+{
+	struct qla_tgt *tgt = cmd->tgt;
+	struct scsi_qla_host *vha = tgt->vha;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	unsigned long flags;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+	    "qla_target(%d): terminating exchange for aborted cmd=%p "
+	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
+	    se_cmd->tag);
+
+	spin_lock_irqsave(&cmd->cmd_lock, flags);
+	if (cmd->aborted) {
+		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+		/*
+		 * It's normal to see 2 calls in this path:
+		 *  1) XFER Rdy completion + CMD_T_ABORT
+		 *  2) TCM TMR - drain_state_list
+		 */
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
+		    "multiple abort. %p transport_state %x, t_state %x, "
+		    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
+		    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
+		return EIO;
+	}
+	cmd->aborted = 1;
+	cmd->trc_flags |= TRC_ABORT;
+	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+	qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
+	return 0;
+}
+EXPORT_SYMBOL(qlt_abort_cmd);
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+	struct fc_port *sess = cmd->sess;
+
+	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
+	    "%s: se_cmd[%p] ox_id %04x\n",
+	    __func__, &cmd->se_cmd,
+	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+
+	BUG_ON(cmd->cmd_in_wq);
+
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(cmd->vha, cmd);
+
+	if (!cmd->q_full)
+		qlt_decr_num_pend_cmds(cmd->vha);
+
+	BUG_ON(cmd->sg_mapped);
+	cmd->jiffies_at_free = get_jiffies_64();
+	if (unlikely(cmd->free_sg))
+		kfree(cmd->sg);
+
+	if (!sess || !sess->se_sess) {
+		WARN_ON(1);
+		return;
+	}
+	cmd->jiffies_at_free = get_jiffies_64();
+	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
+	struct qla_tgt_cmd *cmd, uint32_t status)
+{
+	int term = 0;
+	struct scsi_qla_host *vha = qpair->vha;
+
+	if (cmd->se_cmd.prot_op)
+		ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
+		    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+		    "se_cmd=%p tag[%x] op %#x/%s",
+		     cmd->lba, cmd->lba,
+		     cmd->num_blks, &cmd->se_cmd,
+		     cmd->atio.u.isp24.exchange_addr,
+		     cmd->se_cmd.prot_op,
+		     prot_op_str(cmd->se_cmd.prot_op));
+
+	if (ctio != NULL) {
+		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+		term = !(c->flags &
+		    cpu_to_le16(OF_TERM_EXCH));
+	} else
+		term = 1;
+
+	if (term)
+		qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
+
+	return term;
+}
+
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+	struct rsp_que *rsp, uint32_t handle, void *ctio)
+{
+	struct qla_tgt_cmd *cmd = NULL;
+	struct req_que *req;
+	int qid = GET_QID(handle);
+	uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
+
+	if (unlikely(h == QLA_TGT_SKIP_HANDLE))
+		return NULL;
+
+	if (qid == rsp->req->id) {
+		req = rsp->req;
+	} else if (vha->hw->req_q_map[qid]) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
+		    "qla_target(%d): CTIO completion with different QID %d handle %x\n",
+		    vha->vp_idx, rsp->id, handle);
+		req = vha->hw->req_q_map[qid];
+	} else {
+		return NULL;
+	}
+
+	h &= QLA_CMD_HANDLE_MASK;
+
+	if (h != QLA_TGT_NULL_HANDLE) {
+		if (unlikely(h >= req->num_outstanding_cmds)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe052,
+			    "qla_target(%d): Wrong handle %x received\n",
+			    vha->vp_idx, handle);
+			return NULL;
+		}
+
+		cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
+		if (unlikely(cmd == NULL)) {
+			ql_dbg(ql_dbg_async, vha, 0xe053,
+			    "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
+				vha->vp_idx, handle, req->id, rsp->id);
+			return NULL;
+		}
+		req->outstanding_cmds[h] = NULL;
+	} else if (ctio != NULL) {
+		/* We can't get loop ID from CTIO7 */
+		ql_dbg(ql_dbg_tgt, vha, 0xe054,
+		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+		    "support NULL handles\n", vha->vp_idx);
+		return NULL;
+	}
+
+	return cmd;
+}
+
+/* hardware_lock should be held by caller. */
+void
+qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(vha, cmd);
+
+	/* TODO: fix debug message type and ids. */
+	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+		ql_dbg(ql_dbg_io, vha, 0xff00,
+		    "HOST-ABORT: state=PROCESSED.\n");
+	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+		cmd->write_data_transferred = 0;
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+
+		ql_dbg(ql_dbg_io, vha, 0xff01,
+		    "HOST-ABORT: state=DATA_IN.\n");
+
+		ha->tgt.tgt_ops->handle_data(cmd);
+		return;
+	} else {
+		ql_dbg(ql_dbg_io, vha, 0xff03,
+		    "HOST-ABORT: state=BAD(%d).\n",
+		    cmd->state);
+		dump_stack();
+	}
+
+	cmd->trc_flags |= TRC_FLUSH;
+	ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
+    struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct se_cmd *se_cmd;
+	struct qla_tgt_cmd *cmd;
+	struct qla_qpair *qpair = rsp->qpair;
+
+	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+		/* That could happen only in case of an error/reset/abort */
+		if (status != CTIO_SUCCESS) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+			    "Intermediate CTIO received"
+			    " (status %x)\n", status);
+		}
+		return;
+	}
+
+	cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+	if (cmd == NULL)
+		return;
+
+	se_cmd = &cmd->se_cmd;
+	cmd->cmd_sent_to_fw = 0;
+
+	qlt_unmap_sg(vha, cmd);
+
+	if (unlikely(status != CTIO_SUCCESS)) {
+		switch (status & 0xFFFF) {
+		case CTIO_LIP_RESET:
+		case CTIO_TARGET_RESET:
+		case CTIO_ABORTED:
+			/* driver request abort via Terminate exchange */
+		case CTIO_TIMEOUT:
+		case CTIO_INVALID_RX_ID:
+			/* They are OK */
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+			    "qla_target(%d): CTIO with "
+			    "status %#x received, state %x, se_cmd %p, "
+			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+			    status, cmd->state, se_cmd);
+			break;
+
+		case CTIO_PORT_LOGGED_OUT:
+		case CTIO_PORT_UNAVAILABLE:
+		{
+			int logged_out =
+				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
+
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+			    "qla_target(%d): CTIO with %s status %x "
+			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
+			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
+			    status, cmd->state, se_cmd);
+
+			if (logged_out && cmd->sess) {
+				/*
+				 * Session is already logged out, but we need
+				 * to notify initiator, who's not aware of this
+				 */
+				cmd->sess->logout_on_delete = 0;
+				cmd->sess->send_els_logo = 1;
+				ql_dbg(ql_dbg_disc, vha, 0x20f8,
+				    "%s %d %8phC post del sess\n",
+				    __func__, __LINE__, cmd->sess->port_name);
+
+				qlt_schedule_sess_for_deletion_lock(cmd->sess);
+			}
+			break;
+		}
+		case CTIO_DIF_ERROR: {
+			struct ctio_crc_from_fw *crc =
+				(struct ctio_crc_from_fw *)ctio;
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
+			    "qla_target(%d): CTIO with DIF_ERROR status %x "
+			    "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+			    "expect_dif[0x%llx]\n",
+			    vha->vp_idx, status, cmd->state, se_cmd,
+			    *((u64 *)&crc->actual_dif[0]),
+			    *((u64 *)&crc->expected_dif[0]));
+
+			qlt_handle_dif_error(qpair, cmd, ctio);
+			return;
+		}
+		default:
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
+			    vha->vp_idx, status, cmd->state, se_cmd);
+			break;
+		}
+
+
+		/* "cmd->aborted" means
+		 * cmd is already aborted/terminated, we don't
+		 * need to terminate again.  The exchange is already
+		 * cleaned up/freed at FW level.  Just cleanup at driver
+		 * level.
+		 */
+		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+		    (!cmd->aborted)) {
+			cmd->trc_flags |= TRC_CTIO_ERR;
+			if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
+				return;
+		}
+	}
+
+	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+		cmd->trc_flags |= TRC_CTIO_DONE;
+	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+
+		if (status == CTIO_SUCCESS)
+			cmd->write_data_transferred = 1;
+
+		ha->tgt.tgt_ops->handle_data(cmd);
+		return;
+	} else if (cmd->aborted) {
+		cmd->trc_flags |= TRC_CTIO_ABORTED;
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
+	} else {
+		cmd->trc_flags |= TRC_CTIO_STRANGE;
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+		    "qla_target(%d): A command in state (%d) should "
+		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+	}
+
+	if (unlikely(status != CTIO_SUCCESS) &&
+		!cmd->aborted) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+		dump_stack();
+	}
+
+	ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+	uint8_t task_codes)
+{
+	int fcp_task_attr;
+
+	switch (task_codes) {
+	case ATIO_SIMPLE_QUEUE:
+		fcp_task_attr = TCM_SIMPLE_TAG;
+		break;
+	case ATIO_HEAD_OF_QUEUE:
+		fcp_task_attr = TCM_HEAD_TAG;
+		break;
+	case ATIO_ORDERED_QUEUE:
+		fcp_task_attr = TCM_ORDERED_TAG;
+		break;
+	case ATIO_ACA_QUEUE:
+		fcp_task_attr = TCM_ACA_TAG;
+		break;
+	case ATIO_UNTAGGED:
+		fcp_task_attr = TCM_SIMPLE_TAG;
+		break;
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+		    "qla_target: unknown task code %x, use ORDERED instead\n",
+		    task_codes);
+		fcp_task_attr = TCM_ORDERED_TAG;
+		break;
+	}
+
+	return fcp_task_attr;
+}
+
+static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
+					uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void __qlt_do_work(struct qla_tgt_cmd *cmd)
+{
+	scsi_qla_host_t *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess = cmd->sess;
+	struct atio_from_isp *atio = &cmd->atio;
+	unsigned char *cdb;
+	unsigned long flags;
+	uint32_t data_length;
+	int ret, fcp_task_attr, data_dir, bidi = 0;
+	struct qla_qpair *qpair = cmd->qpair;
+
+	cmd->cmd_in_wq = 0;
+	cmd->trc_flags |= TRC_DO_WORK;
+
+	if (cmd->aborted) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
+		    "cmd with tag %u is aborted\n",
+		    cmd->atio.u.isp24.exchange_addr);
+		goto out_term;
+	}
+
+	spin_lock_init(&cmd->cmd_lock);
+	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
+
+	if (atio->u.isp24.fcp_cmnd.rddata &&
+	    atio->u.isp24.fcp_cmnd.wrdata) {
+		bidi = 1;
+		data_dir = DMA_TO_DEVICE;
+	} else if (atio->u.isp24.fcp_cmnd.rddata)
+		data_dir = DMA_FROM_DEVICE;
+	else if (atio->u.isp24.fcp_cmnd.wrdata)
+		data_dir = DMA_TO_DEVICE;
+	else
+		data_dir = DMA_NONE;
+
+	fcp_task_attr = qlt_get_fcp_task_attr(vha,
+	    atio->u.isp24.fcp_cmnd.task_attr);
+	data_length = be32_to_cpu(get_unaligned((uint32_t *)
+	    &atio->u.isp24.fcp_cmnd.add_cdb[
+	    atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+				          fcp_task_attr, data_dir, bidi);
+	if (ret != 0)
+		goto out_term;
+	/*
+	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+	 */
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	return;
+
+out_term:
+	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
+	/*
+	 * cmd has not sent to target yet, so pass NULL as the second
+	 * argument to qlt_send_term_exchange() and free the memory here.
+	 */
+	cmd->trc_flags |= TRC_DO_WORK_ERR;
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
+
+	qlt_decr_num_pend_cmds(vha);
+	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+static void qlt_do_work(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+	scsi_qla_host_t *vha = cmd->vha;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_del(&cmd->cmd_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	__qlt_do_work(cmd);
+}
+
+void qlt_clr_qp_table(struct scsi_qla_host *vha)
+{
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	void *node;
+	u64 key = 0;
+
+	ql_log(ql_log_info, vha, 0x706c,
+	    "User update Number of Active Qpairs %d\n",
+	    ha->tgt.num_act_qpairs);
+
+	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+
+	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
+		btree_remove64(&tgt->lun_qpair_map, key);
+
+	ha->base_qpair->lun_cnt = 0;
+	for (key = 0; key < ha->max_qpairs; key++)
+		if (ha->queue_pair_map[key])
+			ha->queue_pair_map[key]->lun_cnt = 0;
+
+	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+}
+
+static void qlt_assign_qpair(struct scsi_qla_host *vha,
+	struct qla_tgt_cmd *cmd)
+{
+	struct qla_qpair *qpair, *qp;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct qla_qpair_hint *h;
+
+	if (vha->flags.qpairs_available) {
+		h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
+		if (unlikely(!h)) {
+			/* spread lun to qpair ratio evently */
+			int lcnt = 0, rc;
+			struct scsi_qla_host *base_vha =
+				pci_get_drvdata(vha->hw->pdev);
+
+			qpair = vha->hw->base_qpair;
+			if (qpair->lun_cnt == 0) {
+				qpair->lun_cnt++;
+				h = qla_qpair_to_hint(tgt, qpair);
+				BUG_ON(!h);
+				rc = btree_insert64(&tgt->lun_qpair_map,
+					cmd->unpacked_lun, h, GFP_ATOMIC);
+				if (rc) {
+					qpair->lun_cnt--;
+					ql_log(ql_log_info, vha, 0xd037,
+					    "Unable to insert lun %llx into lun_qpair_map\n",
+					    cmd->unpacked_lun);
+				}
+				goto out;
+			} else {
+				lcnt = qpair->lun_cnt;
+			}
+
+			h = NULL;
+			list_for_each_entry(qp, &base_vha->qp_list,
+			    qp_list_elem) {
+				if (qp->lun_cnt == 0) {
+					qp->lun_cnt++;
+					h = qla_qpair_to_hint(tgt, qp);
+					BUG_ON(!h);
+					rc = btree_insert64(&tgt->lun_qpair_map,
+					    cmd->unpacked_lun, h, GFP_ATOMIC);
+					if (rc) {
+						qp->lun_cnt--;
+						ql_log(ql_log_info, vha, 0xd038,
+							"Unable to insert lun %llx into lun_qpair_map\n",
+							cmd->unpacked_lun);
+					}
+					qpair = qp;
+					goto out;
+				} else {
+					if (qp->lun_cnt < lcnt) {
+						lcnt = qp->lun_cnt;
+						qpair = qp;
+						continue;
+					}
+				}
+			}
+			BUG_ON(!qpair);
+			qpair->lun_cnt++;
+			h = qla_qpair_to_hint(tgt, qpair);
+			BUG_ON(!h);
+			rc = btree_insert64(&tgt->lun_qpair_map,
+				cmd->unpacked_lun, h, GFP_ATOMIC);
+			if (rc) {
+				qpair->lun_cnt--;
+				ql_log(ql_log_info, vha, 0xd039,
+				   "Unable to insert lun %llx into lun_qpair_map\n",
+				   cmd->unpacked_lun);
+			}
+		}
+	} else {
+		h = &tgt->qphints[0];
+	}
+out:
+	cmd->qpair = h->qpair;
+	cmd->se_cmd.cpuid = h->cpuid;
+}
+
+static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
+				       struct fc_port *sess,
+				       struct atio_from_isp *atio)
+{
+	struct se_session *se_sess = sess->se_sess;
+	struct qla_tgt_cmd *cmd;
+	int tag;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+	if (tag < 0)
+		return NULL;
+
+	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+	cmd->cmd_type = TYPE_TGT_CMD;
+	memcpy(&cmd->atio, atio, sizeof(*atio));
+	cmd->state = QLA_TGT_STATE_NEW;
+	cmd->tgt = vha->vha_tgt.qla_tgt;
+	qlt_incr_num_pend_cmds(vha);
+	cmd->vha = vha;
+	cmd->se_cmd.map_tag = tag;
+	cmd->sess = sess;
+	cmd->loop_id = sess->loop_id;
+	cmd->conf_compl_supported = sess->conf_compl_supported;
+
+	cmd->trc_flags = 0;
+	cmd->jiffies_at_alloc = get_jiffies_64();
+
+	cmd->unpacked_lun = scsilun_to_int(
+	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+	qlt_assign_qpair(vha, cmd);
+	cmd->reset_count = vha->hw->base_qpair->chip_reset;
+	cmd->vp_idx = vha->vp_idx;
+
+	return cmd;
+}
+
+static void qlt_create_sess_from_atio(struct work_struct *work)
+{
+	struct qla_tgt_sess_op *op = container_of(work,
+					struct qla_tgt_sess_op, work);
+	scsi_qla_host_t *vha = op->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess;
+	struct qla_tgt_cmd *cmd;
+	unsigned long flags;
+	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_del(&op->cmd_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	if (op->aborted) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
+		    "sess_op with tag %u is aborted\n",
+		    op->atio.u.isp24.exchange_addr);
+		goto out_term;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+	    "qla_target(%d): Unable to find wwn login"
+	    " (s_id %x:%x:%x), trying to create it manually\n",
+	    vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+	if (op->atio.u.raw.entry_count > 1) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+		    "Dropping multy entry atio %p\n", &op->atio);
+		goto out_term;
+	}
+
+	sess = qlt_make_local_sess(vha, s_id);
+	/* sess has an extra creation ref. */
+
+	if (!sess)
+		goto out_term;
+	/*
+	 * Now obtain a pre-allocated session tag using the original op->atio
+	 * packet header, and dispatch into __qlt_do_work() using the existing
+	 * process context.
+	 */
+	cmd = qlt_get_tag(vha, sess, &op->atio);
+	if (!cmd) {
+		struct qla_qpair *qpair = ha->base_qpair;
+
+		spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+		qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
+		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		ha->tgt.tgt_ops->put_sess(sess);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+		kfree(op);
+		return;
+	}
+
+	/*
+	 * __qlt_do_work() will call qlt_put_sess() to release
+	 * the extra reference taken above by qlt_make_local_sess()
+	 */
+	__qlt_do_work(cmd);
+	kfree(op);
+	return;
+out_term:
+	qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
+	kfree(op);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct fc_port *sess;
+	struct qla_tgt_cmd *cmd;
+	unsigned long flags;
+
+	if (unlikely(tgt->tgt_stop)) {
+		ql_dbg(ql_dbg_io, vha, 0x3061,
+		    "New command while device %p is shutting down\n", tgt);
+		return -EFAULT;
+	}
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+	if (unlikely(!sess)) {
+		struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
+						     GFP_ATOMIC);
+		if (!op)
+			return -ENOMEM;
+
+		memcpy(&op->atio, atio, sizeof(*atio));
+		op->vha = vha;
+
+		spin_lock_irqsave(&vha->cmd_list_lock, flags);
+		list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
+		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+		INIT_WORK(&op->work, qlt_create_sess_from_atio);
+		queue_work(qla_tgt_wq, &op->work);
+		return 0;
+	}
+
+	/* Another WWN used to have our s_id. Our PLOGI scheduled its
+	 * session deletion, but it's still in sess_del_work wq */
+	if (sess->deleted) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+		    "New command while old session %p is being deleted\n",
+		    sess);
+		return -EFAULT;
+	}
+
+	/*
+	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
+	 */
+	if (!kref_get_unless_zero(&sess->sess_kref)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+		    "%s: kref_get fail, %8phC oxid %x \n",
+		    __func__, sess->port_name,
+		     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+		return -EFAULT;
+	}
+
+	cmd = qlt_get_tag(vha, sess, atio);
+	if (!cmd) {
+		ql_dbg(ql_dbg_io, vha, 0x3062,
+		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		ha->tgt.tgt_ops->put_sess(sess);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+		return -ENOMEM;
+	}
+
+	cmd->cmd_in_wq = 1;
+	cmd->trc_flags |= TRC_NEW_CMD;
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	INIT_WORK(&cmd->work, qlt_do_work);
+	if (vha->flags.qpairs_available) {
+		queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
+	} else if (ha->msix_count) {
+		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
+			queue_work_on(smp_processor_id(), qla_tgt_wq,
+			    &cmd->work);
+		else
+			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
+			    &cmd->work);
+	} else {
+		queue_work(qla_tgt_wq, &cmd->work);
+	}
+
+	return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
+	int fn, void *iocb, int flags)
+{
+	struct scsi_qla_host *vha = sess->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+	int res;
+
+	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+	if (!mcmd) {
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+		    "qla_target(%d): Allocation of management "
+		    "command failed, some commands and their data could "
+		    "leak\n", vha->vp_idx);
+		return -ENOMEM;
+	}
+	memset(mcmd, 0, sizeof(*mcmd));
+	mcmd->sess = sess;
+
+	if (iocb) {
+		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+		    sizeof(mcmd->orig_iocb.imm_ntfy));
+	}
+	mcmd->tmr_func = fn;
+	mcmd->flags = flags;
+	mcmd->reset_count = ha->base_qpair->chip_reset;
+	mcmd->qpair = ha->base_qpair;
+	mcmd->vha = vha;
+
+	switch (fn) {
+	case QLA_TGT_LUN_RESET:
+	    abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
+	    break;
+	}
+
+	res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
+	if (res != 0) {
+		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+		    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+		    sess->vha->vp_idx, res);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt;
+	struct fc_port *sess;
+	u64 unpacked_lun;
+	int fn;
+	unsigned long flags;
+
+	tgt = vha->vha_tgt.qla_tgt;
+
+	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+	    a->u.isp24.fcp_hdr.s_id);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	unpacked_lun =
+	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
+
+	if (!sess) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+		    "qla_target(%d): task mgmt fn 0x%x for "
+		    "non-existant session\n", vha->vp_idx, fn);
+		return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+		    sizeof(struct atio_from_isp));
+	}
+
+	if (sess->deleted)
+		return -EFAULT;
+
+	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
+{
+	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_mgmt_cmd *mcmd;
+	u64 unpacked_lun;
+	int rc;
+
+	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+	if (mcmd == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+		    vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+	memset(mcmd, 0, sizeof(*mcmd));
+
+	mcmd->sess = sess;
+	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+	    sizeof(mcmd->orig_iocb.imm_ntfy));
+
+	unpacked_lun =
+	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
+	mcmd->reset_count = ha->base_qpair->chip_reset;
+	mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
+	mcmd->qpair = ha->base_qpair;
+
+	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
+	    le16_to_cpu(iocb->u.isp2x.seq_id));
+	if (rc != 0) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+		    vha->vp_idx, rc);
+		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess;
+	int loop_id;
+	unsigned long flags;
+
+	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	if (sess == NULL) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+		    "qla_target(%d): task abort for unexisting "
+		    "session\n", vha->vp_idx);
+		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
+		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+	}
+
+	return __qlt_abort_task(vha, iocb, sess);
+}
+
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
+{
+	if (rc != MBS_COMMAND_COMPLETE) {
+		ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+			"%s: se_sess %p / sess %p from"
+			" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+			" LOGO failed: %#x\n",
+			__func__,
+			fcport->se_sess,
+			fcport,
+			fcport->port_name, fcport->loop_id,
+			fcport->d_id.b.domain, fcport->d_id.b.area,
+			fcport->d_id.b.al_pa, rc);
+	}
+
+	fcport->logout_completed = 1;
+}
+
+/*
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
+*
+* Schedules sessions with matching port_id/loop_id but different wwn for
+* deletion. Returns existing session with matching wwn if present.
+* Null otherwise.
+*/
+struct fc_port *
+qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
+    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
+{
+	struct fc_port *sess = NULL, *other_sess;
+	uint64_t other_wwn;
+
+	*conflict_sess = NULL;
+
+	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
+
+		other_wwn = wwn_to_u64(other_sess->port_name);
+
+		if (wwn == other_wwn) {
+			WARN_ON(sess);
+			sess = other_sess;
+			continue;
+		}
+
+		/* find other sess with nport_id collision */
+		if (port_id.b24 == other_sess->d_id.b24) {
+			if (loop_id != other_sess->loop_id) {
+				ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
+				    "Invalidating sess %p loop_id %d wwn %llx.\n",
+				    other_sess, other_sess->loop_id, other_wwn);
+
+				/*
+				 * logout_on_delete is set by default, but another
+				 * session that has the same s_id/loop_id combo
+				 * might have cleared it when requested this session
+				 * deletion, so don't touch it
+				 */
+				qlt_schedule_sess_for_deletion(other_sess, true);
+			} else {
+				/*
+				 * Another wwn used to have our s_id/loop_id
+				 * kill the session, but don't free the loop_id
+				 */
+				ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
+				    "Invalidating sess %p loop_id %d wwn %llx.\n",
+				    other_sess, other_sess->loop_id, other_wwn);
+
+				other_sess->keep_nport_handle = 1;
+				if (other_sess->disc_state != DSC_DELETED)
+					*conflict_sess = other_sess;
+				qlt_schedule_sess_for_deletion(other_sess,
+				    true);
+			}
+			continue;
+		}
+
+		/* find other sess with nport handle collision */
+		if ((loop_id == other_sess->loop_id) &&
+			(loop_id != FC_NO_LOOP_ID)) {
+			ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
+			       "Invalidating sess %p loop_id %d wwn %llx.\n",
+			       other_sess, other_sess->loop_id, other_wwn);
+
+			/* Same loop_id but different s_id
+			 * Ok to kill and logout */
+			qlt_schedule_sess_for_deletion(other_sess, true);
+		}
+	}
+
+	return sess;
+}
+
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
+{
+	struct qla_tgt_sess_op *op;
+	struct qla_tgt_cmd *cmd;
+	uint32_t key;
+	int count = 0;
+	unsigned long flags;
+
+	key = (((u32)s_id->b.domain << 16) |
+	       ((u32)s_id->b.area   <<  8) |
+	       ((u32)s_id->b.al_pa));
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
+		if (op_key == key) {
+			op->aborted = true;
+			count++;
+		}
+	}
+
+	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+		if (op_key == key) {
+			op->aborted = true;
+			count++;
+		}
+	}
+
+	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+		if (cmd_key == key) {
+			cmd->aborted = 1;
+			count++;
+		}
+	}
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	return count;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess = NULL, *conflict_sess = NULL;
+	uint64_t wwn;
+	port_id_t port_id;
+	uint16_t loop_id;
+	uint16_t wd3_lo;
+	int res = 0;
+	struct qlt_plogi_ack_t *pla;
+	unsigned long flags;
+
+	wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+	port_id.b.domain = iocb->u.isp24.port_id[2];
+	port_id.b.area   = iocb->u.isp24.port_id[1];
+	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
+	port_id.b.rsvd_1 = 0;
+
+	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+	ql_dbg(ql_dbg_disc, vha, 0xf026,
+	    "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
+	    vha->vp_idx, iocb->u.isp24.port_id[2],
+		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+		   iocb->u.isp24.status_subcode, loop_id,
+		iocb->u.isp24.port_name);
+
+	/* res = 1 means ack at the end of thread
+	 * res = 0 means ack async/later.
+	 */
+	switch (iocb->u.isp24.status_subcode) {
+	case ELS_PLOGI:
+
+		/* Mark all stale commands in qla_tgt_wq for deletion */
+		abort_cmds_for_s_id(vha, &port_id);
+
+		if (wwn) {
+			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+			sess = qlt_find_sess_invalidate_other(vha, wwn,
+				port_id, loop_id, &conflict_sess);
+			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
+		}
+
+		if (IS_SW_RESV_ADDR(port_id)) {
+			res = 1;
+			break;
+		}
+
+		pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+		if (!pla) {
+			qlt_send_term_imm_notif(vha, iocb, 1);
+			break;
+		}
+
+		res = 0;
+
+		if (conflict_sess) {
+			conflict_sess->login_gen++;
+			qlt_plogi_ack_link(vha, pla, conflict_sess,
+				QLT_PLOGI_LINK_CONFLICT);
+		}
+
+		if (!sess) {
+			pla->ref_count++;
+			qla24xx_post_newsess_work(vha, &port_id,
+				iocb->u.isp24.port_name, pla);
+			res = 0;
+			break;
+		}
+
+		qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
+		sess->fw_login_state = DSC_LS_PLOGI_PEND;
+		sess->d_id = port_id;
+		sess->login_gen++;
+
+		switch (sess->disc_state) {
+		case DSC_DELETED:
+			qlt_plogi_ack_unref(vha, pla);
+			break;
+
+		default:
+			/*
+			 * Under normal circumstances we want to release nport handle
+			 * during LOGO process to avoid nport handle leaks inside FW.
+			 * The exception is when LOGO is done while another PLOGI with
+			 * the same nport handle is waiting as might be the case here.
+			 * Note: there is always a possibily of a race where session
+			 * deletion has already started for other reasons (e.g. ACL
+			 * removal) and now PLOGI arrives:
+			 * 1. if PLOGI arrived in FW after nport handle has been freed,
+			 *    FW must have assigned this PLOGI a new/same handle and we
+			 *    can proceed ACK'ing it as usual when session deletion
+			 *    completes.
+			 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+			 *    bit reached it, the handle has now been released. We'll
+			 *    get an error when we ACK this PLOGI. Nothing will be sent
+			 *    back to initiator. Initiator should eventually retry
+			 *    PLOGI and situation will correct itself.
+			 */
+			sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+			   (sess->d_id.b24 == port_id.b24));
+
+			ql_dbg(ql_dbg_disc, vha, 0x20f9,
+			    "%s %d %8phC post del sess\n",
+			    __func__, __LINE__, sess->port_name);
+
+
+			qlt_schedule_sess_for_deletion_lock(sess);
+			break;
+		}
+
+		break;
+
+	case ELS_PRLI:
+		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+		if (wwn) {
+			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+			sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
+				loop_id, &conflict_sess);
+			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
+		}
+
+		if (conflict_sess) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+			    "PRLI with conflicting sess %p port %8phC\n",
+			    conflict_sess, conflict_sess->port_name);
+			qlt_send_term_imm_notif(vha, iocb, 1);
+			res = 0;
+			break;
+		}
+
+		if (sess != NULL) {
+			if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+			    sess->fw_login_state != DSC_LS_PLOGI_COMP) {
+				/*
+				 * Impatient initiator sent PRLI before last
+				 * PLOGI could finish. Will force him to re-try,
+				 * while last one finishes.
+				 */
+				ql_log(ql_log_warn, sess->vha, 0xf095,
+				    "sess %p PRLI received, before plogi ack.\n",
+				    sess);
+				qlt_send_term_imm_notif(vha, iocb, 1);
+				res = 0;
+				break;
+			}
+
+			/*
+			 * This shouldn't happen under normal circumstances,
+			 * since we have deleted the old session during PLOGI
+			 */
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
+			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
+			    sess->loop_id, sess, iocb->u.isp24.nport_handle);
+
+			sess->local = 0;
+			sess->loop_id = loop_id;
+			sess->d_id = port_id;
+			sess->fw_login_state = DSC_LS_PRLI_PEND;
+
+			if (wd3_lo & BIT_7)
+				sess->conf_compl_supported = 1;
+
+			if ((wd3_lo & BIT_4) == 0)
+				sess->port_type = FCT_INITIATOR;
+			else
+				sess->port_type = FCT_TARGET;
+		}
+		res = 1; /* send notify ack */
+
+		/* Make session global (not used in fabric mode) */
+		if (ha->current_topology != ISP_CFG_F) {
+			if (sess) {
+				ql_dbg(ql_dbg_disc, vha, 0x20fa,
+				    "%s %d %8phC post nack\n",
+				    __func__, __LINE__, sess->port_name);
+				qla24xx_post_nack_work(vha, sess, iocb,
+					SRB_NACK_PRLI);
+				res = 0;
+			} else {
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+		} else {
+			if (sess) {
+				ql_dbg(ql_dbg_disc, vha, 0x20fb,
+				    "%s %d %8phC post nack\n",
+				    __func__, __LINE__, sess->port_name);
+				qla24xx_post_nack_work(vha, sess, iocb,
+					SRB_NACK_PRLI);
+				res = 0;
+			}
+		}
+		break;
+
+	case ELS_TPRLO:
+		if (le16_to_cpu(iocb->u.isp24.flags) &
+			NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
+			loop_id = 0xFFFF;
+			qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
+			res = 1;
+			break;
+		}
+		/* drop through */
+	case ELS_LOGO:
+	case ELS_PRLO:
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+		if (sess) {
+			sess->login_gen++;
+			sess->fw_login_state = DSC_LS_LOGO_PEND;
+			sess->logo_ack_needed = 1;
+			memcpy(sess->iocb, iocb, IOCB_SIZE);
+		}
+
+		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+
+		ql_dbg(ql_dbg_disc, vha, 0x20fc,
+		    "%s: logo %llx res %d sess %p ",
+		    __func__, wwn, res, sess);
+		if (res == 0) {
+			/*
+			 * cmd went upper layer, look for qlt_xmit_tm_rsp()
+			 * for LOGO_ACK & sess delete
+			 */
+			BUG_ON(!sess);
+			res = 0;
+		} else {
+			/* cmd did not go to upper layer. */
+			if (sess) {
+				qlt_schedule_sess_for_deletion_lock(sess);
+				res = 0;
+			}
+			/* else logo will be ack */
+		}
+		break;
+	case ELS_PDISC:
+	case ELS_ADISC:
+	{
+		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+		if (tgt->link_reinit_iocb_pending) {
+			qlt_send_notify_ack(ha->base_qpair,
+			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
+			tgt->link_reinit_iocb_pending = 0;
+		}
+
+		sess = qla2x00_find_fcport_by_wwpn(vha,
+		    iocb->u.isp24.port_name, 1);
+		if (sess) {
+			ql_dbg(ql_dbg_disc, vha, 0x20fd,
+				"sess %p lid %d|%d DS %d LS %d\n",
+				sess, sess->loop_id, loop_id,
+				sess->disc_state, sess->fw_login_state);
+		}
+
+		res = 1; /* send notify ack */
+		break;
+	}
+
+	case ELS_FLOGI:	/* should never happen */
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+		    "qla_target(%d): Unsupported ELS command %x "
+		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+		break;
+	}
+
+	return res;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+	struct imm_ntfy_from_isp *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t add_flags = 0;
+	int send_notify_ack = 1;
+	uint16_t status;
+
+	status = le16_to_cpu(iocb->u.isp2x.status);
+	switch (status) {
+	case IMM_NTFY_LIP_RESET:
+	{
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+		    iocb->u.isp24.status_subcode);
+
+		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+			send_notify_ack = 0;
+		break;
+	}
+
+	case IMM_NTFY_LIP_LINK_REINIT:
+	{
+		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+		    "qla_target(%d): LINK REINIT (loop %#x, "
+		    "subcode %x)\n", vha->vp_idx,
+		    le16_to_cpu(iocb->u.isp24.nport_handle),
+		    iocb->u.isp24.status_subcode);
+		if (tgt->link_reinit_iocb_pending) {
+			qlt_send_notify_ack(ha->base_qpair,
+			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
+		}
+		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+		tgt->link_reinit_iocb_pending = 1;
+		/*
+		 * QLogic requires to wait after LINK REINIT for possible
+		 * PDISC or ADISC ELS commands
+		 */
+		send_notify_ack = 0;
+		break;
+	}
+
+	case IMM_NTFY_PORT_LOGOUT:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+		    "qla_target(%d): Port logout (loop "
+		    "%#x, subcode %x)\n", vha->vp_idx,
+		    le16_to_cpu(iocb->u.isp24.nport_handle),
+		    iocb->u.isp24.status_subcode);
+
+		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+			send_notify_ack = 0;
+		/* The sessions will be cleared in the callback, if needed */
+		break;
+
+	case IMM_NTFY_GLBL_TPRLO:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+			send_notify_ack = 0;
+		/* The sessions will be cleared in the callback, if needed */
+		break;
+
+	case IMM_NTFY_PORT_CONFIG:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+		    status);
+		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+			send_notify_ack = 0;
+		/* The sessions will be cleared in the callback, if needed */
+		break;
+
+	case IMM_NTFY_GLBL_LOGO:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+		    "qla_target(%d): Link failure detected\n",
+		    vha->vp_idx);
+		/* I_T nexus loss */
+		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_IOCB_OVERFLOW:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+		    "qla_target(%d): Cannot provide requested "
+		    "capability (IOCB overflowed the immediate notify "
+		    "resource count)\n", vha->vp_idx);
+		break;
+
+	case IMM_NTFY_ABORT_TASK:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+		    "qla_target(%d): Abort Task (S %08x I %#x -> "
+		    "L %#x)\n", vha->vp_idx,
+		    le16_to_cpu(iocb->u.isp2x.seq_id),
+		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+		    le16_to_cpu(iocb->u.isp2x.lun));
+		if (qlt_abort_task(vha, iocb) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_RESOURCE:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+		    "qla_target(%d): Out of resources, host %ld\n",
+		    vha->vp_idx, vha->host_no);
+		break;
+
+	case IMM_NTFY_MSG_RX:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+		    "qla_target(%d): Immediate notify task %x\n",
+		    vha->vp_idx, iocb->u.isp2x.task_flags);
+		if (qlt_handle_task_mgmt(vha, iocb) == 0)
+			send_notify_ack = 0;
+		break;
+
+	case IMM_NTFY_ELS:
+		if (qlt_24xx_handle_els(vha, iocb) == 0)
+			send_notify_ack = 0;
+		break;
+	default:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+		    "qla_target(%d): Received unknown immediate "
+		    "notify status %x\n", vha->vp_idx, status);
+		break;
+	}
+
+	if (send_notify_ack)
+		qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
+		    0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static int __qlt_send_busy(struct qla_qpair *qpair,
+	struct atio_from_isp *atio, uint16_t status)
+{
+	struct scsi_qla_host *vha = qpair->vha;
+	struct ctio7_to_24xx *ctio24;
+	struct qla_hw_data *ha = vha->hw;
+	request_t *pkt;
+	struct fc_port *sess = NULL;
+	unsigned long flags;
+	u16 temp;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+	    atio->u.isp24.fcp_hdr.s_id);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+	if (!sess) {
+		qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
+		return 0;
+	}
+	/* Sending marker isn't necessary, since we called from ISR */
+
+	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
+	if (!pkt) {
+		ql_dbg(ql_dbg_io, vha, 0x3063,
+		    "qla_target(%d): %s failed: unable to allocate "
+		    "request packet", vha->vp_idx, __func__);
+		return -ENOMEM;
+	}
+
+	qpair->tgt_counters.num_q_full_sent++;
+	pkt->entry_count = 1;
+	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+	ctio24 = (struct ctio7_to_24xx *)pkt;
+	ctio24->entry_type = CTIO_TYPE7;
+	ctio24->nport_handle = sess->loop_id;
+	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio24->vp_index = vha->vp_idx;
+	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+	temp = (atio->u.isp24.attr << 9) |
+		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+		CTIO7_FLAGS_DONT_RET_CTIO;
+	ctio24->u.status1.flags = cpu_to_le16(temp);
+	/*
+	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+	 * if the explicit conformation is used.
+	 */
+	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+	ctio24->u.status1.scsi_status = cpu_to_le16(status);
+	/* Memory Barrier */
+	wmb();
+	if (qpair->reqq_start_iocbs)
+		qpair->reqq_start_iocbs(qpair);
+	else
+		qla2x00_start_iocbs(vha, qpair->req);
+	return 0;
+}
+
+/*
+ * This routine is used to allocate a command for either a QFull condition
+ * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
+ * out previously.
+ */
+static void
+qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint16_t status, int qfull)
+{
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess;
+	struct se_session *se_sess;
+	struct qla_tgt_cmd *cmd;
+	int tag;
+	unsigned long flags;
+
+	if (unlikely(tgt->tgt_stop)) {
+		ql_dbg(ql_dbg_io, vha, 0x300a,
+			"New command while device %p is shutting down\n", tgt);
+		return;
+	}
+
+	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
+		vha->hw->tgt.num_qfull_cmds_dropped++;
+		if (vha->hw->tgt.num_qfull_cmds_dropped >
+			vha->qla_stats.stat_max_qfull_cmds_dropped)
+			vha->qla_stats.stat_max_qfull_cmds_dropped =
+				vha->hw->tgt.num_qfull_cmds_dropped;
+
+		ql_dbg(ql_dbg_io, vha, 0x3068,
+			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
+			vha->vp_idx, __func__,
+			vha->hw->tgt.num_qfull_cmds_dropped);
+
+		qlt_chk_exch_leak_thresh_hold(vha);
+		return;
+	}
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id
+		(vha, atio->u.isp24.fcp_hdr.s_id);
+	if (!sess)
+		return;
+
+	se_sess = sess->se_sess;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+	if (tag < 0)
+		return;
+
+	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+	if (!cmd) {
+		ql_dbg(ql_dbg_io, vha, 0x3009,
+			"qla_target(%d): %s: Allocation of cmd failed\n",
+			vha->vp_idx, __func__);
+
+		vha->hw->tgt.num_qfull_cmds_dropped++;
+		if (vha->hw->tgt.num_qfull_cmds_dropped >
+			vha->qla_stats.stat_max_qfull_cmds_dropped)
+			vha->qla_stats.stat_max_qfull_cmds_dropped =
+				vha->hw->tgt.num_qfull_cmds_dropped;
+
+		qlt_chk_exch_leak_thresh_hold(vha);
+		return;
+	}
+
+	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+	qlt_incr_num_pend_cmds(vha);
+	INIT_LIST_HEAD(&cmd->cmd_list);
+	memcpy(&cmd->atio, atio, sizeof(*atio));
+
+	cmd->tgt = vha->vha_tgt.qla_tgt;
+	cmd->vha = vha;
+	cmd->reset_count = ha->base_qpair->chip_reset;
+	cmd->q_full = 1;
+	cmd->qpair = ha->base_qpair;
+
+	if (qfull) {
+		cmd->q_full = 1;
+		/* NOTE: borrowing the state field to carry the status */
+		cmd->state = status;
+	} else
+		cmd->term_exchg = 1;
+
+	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
+
+	vha->hw->tgt.num_qfull_cmds_alloc++;
+	if (vha->hw->tgt.num_qfull_cmds_alloc >
+		vha->qla_stats.stat_max_qfull_cmds_alloc)
+		vha->qla_stats.stat_max_qfull_cmds_alloc =
+			vha->hw->tgt.num_qfull_cmds_alloc;
+	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+
+int
+qlt_free_qfull_cmds(struct qla_qpair *qpair)
+{
+	struct scsi_qla_host *vha = qpair->vha;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	struct qla_tgt_cmd *cmd, *tcmd;
+	struct list_head free_list, q_full_list;
+	int rc = 0;
+
+	if (list_empty(&ha->tgt.q_full_list))
+		return 0;
+
+	INIT_LIST_HEAD(&free_list);
+	INIT_LIST_HEAD(&q_full_list);
+
+	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+	if (list_empty(&ha->tgt.q_full_list)) {
+		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+		return 0;
+	}
+
+	list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
+	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
+		if (cmd->q_full)
+			/* cmd->state is a borrowed field to hold status */
+			rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
+		else if (cmd->term_exchg)
+			rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
+
+		if (rc == -ENOMEM)
+			break;
+
+		if (cmd->q_full)
+			ql_dbg(ql_dbg_io, vha, 0x3006,
+			    "%s: busy sent for ox_id[%04x]\n", __func__,
+			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+		else if (cmd->term_exchg)
+			ql_dbg(ql_dbg_io, vha, 0x3007,
+			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
+			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+		else
+			ql_dbg(ql_dbg_io, vha, 0x3008,
+			    "%s: Unexpected cmd in QFull list %p\n", __func__,
+			    cmd);
+
+		list_del(&cmd->cmd_list);
+		list_add_tail(&cmd->cmd_list, &free_list);
+
+		/* piggy back on hardware_lock for protection */
+		vha->hw->tgt.num_qfull_cmds_alloc--;
+	}
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+	cmd = NULL;
+
+	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+		list_del(&cmd->cmd_list);
+		/* This cmd was never sent to TCM.  There is no need
+		 * to schedule free or call free_cmd
+		 */
+		qlt_free_cmd(cmd);
+	}
+
+	if (!list_empty(&q_full_list)) {
+		spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+		list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
+		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+	}
+
+	return rc;
+}
+
+static void
+qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
+    uint16_t status)
+{
+	int rc = 0;
+	struct scsi_qla_host *vha = qpair->vha;
+
+	rc = __qlt_send_busy(qpair, atio, status);
+	if (rc == -ENOMEM)
+		qlt_alloc_qfull_cmd(vha, atio, status, 1);
+}
+
+static int
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+	struct atio_from_isp *atio, uint8_t ha_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t status;
+	unsigned long flags;
+
+	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+		return 0;
+
+	if (!ha_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+	status = temp_sam_status;
+	qlt_send_busy(qpair, atio, status);
+	if (!ha_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return 1;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint8_t ha_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	int rc;
+	unsigned long flags;
+
+	if (unlikely(tgt == NULL)) {
+		ql_dbg(ql_dbg_tgt, vha, 0x3064,
+		    "ATIO pkt, but no tgt (ha %p)", ha);
+		return;
+	}
+	/*
+	 * In tgt_stop mode we also should allow all requests to pass.
+	 * Otherwise, some commands can stuck.
+	 */
+
+	tgt->atio_irq_cmd_count++;
+
+	switch (atio->u.raw.entry_type) {
+	case ATIO_TYPE7:
+		if (unlikely(atio->u.isp24.exchange_addr ==
+		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+			ql_dbg(ql_dbg_io, vha, 0x3065,
+			    "qla_target(%d): ATIO_TYPE7 "
+			    "received with UNKNOWN exchange address, "
+			    "sending QUEUE_FULL\n", vha->vp_idx);
+			if (!ha_locked)
+				spin_lock_irqsave(&ha->hardware_lock, flags);
+			qlt_send_busy(ha->base_qpair, atio,
+			    SAM_STAT_TASK_SET_FULL);
+			if (!ha_locked)
+				spin_unlock_irqrestore(&ha->hardware_lock,
+				    flags);
+			break;
+		}
+
+		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+			rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
+			    atio, ha_locked);
+			if (rc != 0) {
+				tgt->atio_irq_cmd_count--;
+				return;
+			}
+			rc = qlt_handle_cmd_for_atio(vha, atio);
+		} else {
+			rc = qlt_handle_task_mgmt(vha, atio);
+		}
+		if (unlikely(rc != 0)) {
+			if (rc == -ESRCH) {
+				if (!ha_locked)
+					spin_lock_irqsave(&ha->hardware_lock,
+					    flags);
+
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+				qlt_send_busy(ha->base_qpair, atio,
+				    SAM_STAT_BUSY);
+#else
+				qlt_send_term_exchange(ha->base_qpair, NULL,
+				    atio, 1, 0);
+#endif
+				if (!ha_locked)
+					spin_unlock_irqrestore(
+					    &ha->hardware_lock, flags);
+			} else {
+				if (tgt->tgt_stop) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe059,
+					    "qla_target: Unable to send "
+					    "command to target for req, "
+					    "ignoring.\n");
+				} else {
+					ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+					    "qla_target(%d): Unable to send "
+					    "command to target, sending BUSY "
+					    "status.\n", vha->vp_idx);
+					if (!ha_locked)
+						spin_lock_irqsave(
+						    &ha->hardware_lock, flags);
+					qlt_send_busy(ha->base_qpair,
+					    atio, SAM_STAT_BUSY);
+					if (!ha_locked)
+						spin_unlock_irqrestore(
+						    &ha->hardware_lock, flags);
+				}
+			}
+		}
+		break;
+
+	case IMMED_NOTIFY_TYPE:
+	{
+		if (unlikely(atio->u.isp2x.entry_status != 0)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+			    "qla_target(%d): Received ATIO packet %x "
+			    "with error status %x\n", vha->vp_idx,
+			    atio->u.raw.entry_type,
+			    atio->u.isp2x.entry_status);
+			break;
+		}
+		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+
+		if (!ha_locked)
+			spin_lock_irqsave(&ha->hardware_lock, flags);
+		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+		if (!ha_locked)
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		break;
+	}
+
+	default:
+		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+		    "qla_target(%d): Received unknown ATIO atio "
+		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+		break;
+	}
+
+	tgt->atio_irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha,
+	struct rsp_que *rsp, response_t *pkt)
+{
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+	if (unlikely(tgt == NULL)) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+		    "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
+		    vha->vp_idx, pkt->entry_type, vha->hw);
+		return;
+	}
+
+	/*
+	 * In tgt_stop mode we also should allow all requests to pass.
+	 * Otherwise, some commands can stuck.
+	 */
+
+	switch (pkt->entry_type) {
+	case CTIO_CRC2:
+	case CTIO_TYPE7:
+	{
+		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+		qlt_do_ctio_completion(vha, rsp, entry->handle,
+		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+		    entry);
+		break;
+	}
+
+	case ACCEPT_TGT_IO_TYPE:
+	{
+		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+		int rc;
+		if (atio->u.isp2x.status !=
+		    cpu_to_le16(ATIO_CDB_VALID)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+			    "qla_target(%d): ATIO with error "
+			    "status %x received\n", vha->vp_idx,
+			    le16_to_cpu(atio->u.isp2x.status));
+			break;
+		}
+
+		rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
+		if (rc != 0)
+			return;
+
+		rc = qlt_handle_cmd_for_atio(vha, atio);
+		if (unlikely(rc != 0)) {
+			if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+				qlt_send_busy(rsp->qpair, atio, 0);
+#else
+				qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
+#endif
+			} else {
+				if (tgt->tgt_stop) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+					    "qla_target: Unable to send "
+					    "command to target, sending TERM "
+					    "EXCHANGE for rsp\n");
+					qlt_send_term_exchange(rsp->qpair, NULL,
+					    atio, 1, 0);
+				} else {
+					ql_dbg(ql_dbg_tgt, vha, 0xe060,
+					    "qla_target(%d): Unable to send "
+					    "command to target, sending BUSY "
+					    "status\n", vha->vp_idx);
+					qlt_send_busy(rsp->qpair, atio, 0);
+				}
+			}
+		}
+	}
+	break;
+
+	case CONTINUE_TGT_IO_TYPE:
+	{
+		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+		qlt_do_ctio_completion(vha, rsp, entry->handle,
+		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+		    entry);
+		break;
+	}
+
+	case CTIO_A64_TYPE:
+	{
+		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+		qlt_do_ctio_completion(vha, rsp, entry->handle,
+		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+		    entry);
+		break;
+	}
+
+	case IMMED_NOTIFY_TYPE:
+		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+		break;
+
+	case NOTIFY_ACK_TYPE:
+		if (tgt->notify_ack_expected > 0) {
+			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+			ql_dbg(ql_dbg_tgt, vha, 0xe036,
+			    "NOTIFY_ACK seq %08x status %x\n",
+			    le16_to_cpu(entry->u.isp2x.seq_id),
+			    le16_to_cpu(entry->u.isp2x.status));
+			tgt->notify_ack_expected--;
+			if (entry->u.isp2x.status !=
+			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+				ql_dbg(ql_dbg_tgt, vha, 0xe061,
+				    "qla_target(%d): NOTIFY_ACK "
+				    "failed %x\n", vha->vp_idx,
+				    le16_to_cpu(entry->u.isp2x.status));
+			}
+		} else {
+			ql_dbg(ql_dbg_tgt, vha, 0xe062,
+			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+			    vha->vp_idx);
+		}
+		break;
+
+	case ABTS_RECV_24XX:
+		ql_dbg(ql_dbg_tgt, vha, 0xe037,
+		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+		break;
+
+	case ABTS_RESP_24XX:
+		if (tgt->abts_resp_expected > 0) {
+			struct abts_resp_from_24xx_fw *entry =
+				(struct abts_resp_from_24xx_fw *)pkt;
+			ql_dbg(ql_dbg_tgt, vha, 0xe038,
+			    "ABTS_RESP_24XX: compl_status %x\n",
+			    entry->compl_status);
+			tgt->abts_resp_expected--;
+			if (le16_to_cpu(entry->compl_status) !=
+			    ABTS_RESP_COMPL_SUCCESS) {
+				if ((entry->error_subcode1 == 0x1E) &&
+				    (entry->error_subcode2 == 0)) {
+					/*
+					 * We've got a race here: aborted
+					 * exchange not terminated, i.e.
+					 * response for the aborted command was
+					 * sent between the abort request was
+					 * received and processed.
+					 * Unfortunately, the firmware has a
+					 * silly requirement that all aborted
+					 * exchanges must be explicitely
+					 * terminated, otherwise it refuses to
+					 * send responses for the abort
+					 * requests. So, we have to
+					 * (re)terminate the exchange and retry
+					 * the abort response.
+					 */
+					qlt_24xx_retry_term_exchange(vha,
+					    entry);
+				} else
+					ql_dbg(ql_dbg_tgt, vha, 0xe063,
+					    "qla_target(%d): ABTS_RESP_24XX "
+					    "failed %x (subcode %x:%x)",
+					    vha->vp_idx, entry->compl_status,
+					    entry->error_subcode1,
+					    entry->error_subcode2);
+			}
+		} else {
+			ql_dbg(ql_dbg_tgt, vha, 0xe064,
+			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
+			    "received\n", vha->vp_idx);
+		}
+		break;
+
+	default:
+		ql_dbg(ql_dbg_tgt, vha, 0xe065,
+		    "qla_target(%d): Received unknown response pkt "
+		    "type %x\n", vha->vp_idx, pkt->entry_type);
+		break;
+	}
+
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+	uint16_t *mailbox)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	int login_code;
+
+	if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
+		return;
+
+	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+	    IS_QLA2100(ha))
+		return;
+	/*
+	 * In tgt_stop mode we also should allow all requests to pass.
+	 * Otherwise, some commands can stuck.
+	 */
+
+
+	switch (code) {
+	case MBA_RESET:			/* Reset */
+	case MBA_SYSTEM_ERR:		/* System Error */
+	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
+	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+		    "qla_target(%d): System error async event %#x "
+		    "occurred", vha->vp_idx, code);
+		break;
+	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case MBA_LOOP_UP:
+	{
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+		    "qla_target(%d): Async LOOP_UP occurred "
+		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+		if (tgt->link_reinit_iocb_pending) {
+			qlt_send_notify_ack(ha->base_qpair,
+			    (void *)&tgt->link_reinit_iocb,
+			    0, 0, 0, 0, 0, 0);
+			tgt->link_reinit_iocb_pending = 0;
+		}
+		break;
+	}
+
+	case MBA_LIP_OCCURRED:
+	case MBA_LOOP_DOWN:
+	case MBA_LIP_RESET:
+	case MBA_RSCN_UPDATE:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+		    "qla_target(%d): Async event %#x occurred "
+		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+		break;
+
+	case MBA_REJECTED_FCP_CMD:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
+		    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
+		    vha->vp_idx,
+		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+		if (le16_to_cpu(mailbox[3]) == 1) {
+			/* exchange starvation. */
+			vha->hw->exch_starvation++;
+			if (vha->hw->exch_starvation > 5) {
+				ql_log(ql_log_warn, vha, 0xd03a,
+				    "Exchange starvation-. Resetting RISC\n");
+
+				vha->hw->exch_starvation = 0;
+				if (IS_P3P_TYPE(vha->hw))
+					set_bit(FCOE_CTX_RESET_NEEDED,
+					    &vha->dpc_flags);
+				else
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+		}
+		break;
+
+	case MBA_PORT_UPDATE:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+		    "qla_target(%d): Port update async event %#x "
+		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
+		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+		login_code = le16_to_cpu(mailbox[2]);
+		if (login_code == 0x4) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+			    "Async MB 2: Got PLOGI Complete\n");
+			vha->hw->exch_starvation = 0;
+		} else if (login_code == 0x7)
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+			    "Async MB 2: Port Logged Out\n");
+		break;
+	default:
+		break;
+	}
+
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+	uint16_t loop_id)
+{
+	fc_port_t *fcport, *tfcp, *del;
+	int rc;
+	unsigned long flags;
+	u8 newfcport = 0;
+
+	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (!fcport) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+		    "qla_target(%d): Allocation of tmp FC port failed",
+		    vha->vp_idx);
+		return NULL;
+	}
+
+	fcport->loop_id = loop_id;
+
+	rc = qla24xx_gpdb_wait(vha, fcport, 0);
+	if (rc != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+		    "qla_target(%d): Failed to retrieve fcport "
+		    "information -- get_port_database() returned %x "
+		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+		kfree(fcport);
+		return NULL;
+	}
+
+	del = NULL;
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
+
+	if (tfcp) {
+		tfcp->d_id = fcport->d_id;
+		tfcp->port_type = fcport->port_type;
+		tfcp->supported_classes = fcport->supported_classes;
+		tfcp->flags |= fcport->flags;
+		tfcp->scan_state = QLA_FCPORT_FOUND;
+
+		del = fcport;
+		fcport = tfcp;
+	} else {
+		if (vha->hw->current_topology == ISP_CFG_F)
+			fcport->flags |= FCF_FABRIC_DEVICE;
+
+		list_add_tail(&fcport->list, &vha->vp_fcports);
+		if (!IS_SW_RESV_ADDR(fcport->d_id))
+		   vha->fcport_count++;
+		fcport->login_gen++;
+		fcport->disc_state = DSC_LOGIN_COMPLETE;
+		fcport->login_succ = 1;
+		newfcport = 1;
+	}
+
+	fcport->deleted = 0;
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	switch (vha->host->active_mode) {
+	case MODE_INITIATOR:
+	case MODE_DUAL:
+		if (newfcport) {
+			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
+				ql_dbg(ql_dbg_disc, vha, 0x20fe,
+				   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
+				qla24xx_post_upd_fcport_work(vha, fcport);
+			} else {
+				ql_dbg(ql_dbg_disc, vha, 0x20ff,
+				   "%s %d %8phC post gpsc fcp_cnt %d\n",
+				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
+				qla24xx_post_gpsc_work(vha, fcport);
+			}
+		}
+		break;
+
+	case MODE_TARGET:
+	default:
+		break;
+	}
+	if (del)
+		qla2x00_free_fcport(del);
+
+	return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
+	uint8_t *s_id)
+{
+	struct fc_port *sess = NULL;
+	fc_port_t *fcport = NULL;
+	int rc, global_resets;
+	uint16_t loop_id = 0;
+
+	if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
+		/*
+		 * This is Domain Controller, so it should be
+		 * OK to drop SCSI commands from it.
+		 */
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+		    "Unable to find initiator with S_ID %x:%x:%x",
+		    s_id[0], s_id[1], s_id[2]);
+		return NULL;
+	}
+
+	mutex_lock(&vha->vha_tgt.tgt_mutex);
+
+retry:
+	global_resets =
+	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+
+	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+	if (rc != 0) {
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+		ql_log(ql_log_info, vha, 0xf071,
+		    "qla_target(%d): Unable to find "
+		    "initiator with S_ID %x:%x:%x",
+		    vha->vp_idx, s_id[0], s_id[1],
+		    s_id[2]);
+
+		if (rc == -ENOENT) {
+			qlt_port_logo_t logo;
+			sid_to_portid(s_id, &logo.id);
+			logo.cmd_count = 1;
+			qlt_send_first_logo(vha, &logo);
+		}
+
+		return NULL;
+	}
+
+	fcport = qlt_get_port_database(vha, loop_id);
+	if (!fcport) {
+		mutex_unlock(&vha->vha_tgt.tgt_mutex);
+		return NULL;
+	}
+
+	if (global_resets !=
+	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+		    "qla_target(%d): global reset during session discovery "
+		    "(counter was %d, new %d), retrying", vha->vp_idx,
+		    global_resets,
+		    atomic_read(&vha->vha_tgt.
+			qla_tgt->tgt_global_resets_count));
+		goto retry;
+	}
+
+	sess = qlt_create_sess(vha, fcport, true);
+
+	mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+	return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+	struct qla_tgt_sess_work_param *prm)
+{
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess = NULL;
+	unsigned long flags = 0, flags2 = 0;
+	uint8_t s_id[3];
+	int rc;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
+
+	if (tgt->tgt_stop)
+		goto out_term2;
+
+	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+	if (!sess) {
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
+		sess = qlt_make_local_sess(vha, s_id);
+		/* sess has got an extra creation ref */
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
+		if (!sess)
+			goto out_term2;
+	} else {
+		if (sess->deleted) {
+			sess = NULL;
+			goto out_term2;
+		}
+
+		if (!kref_get_unless_zero(&sess->sess_kref)) {
+			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
+			    "%s: kref_get fail %8phC \n",
+			     __func__, sess->port_name);
+			sess = NULL;
+			goto out_term2;
+		}
+	}
+
+	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+	ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
+	if (rc != 0)
+		goto out_term;
+	return;
+
+out_term2:
+	if (sess)
+		ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
+out_term:
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
+	    FCP_TMF_REJECTED, false);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+	struct qla_tgt_sess_work_param *prm)
+{
+	struct atio_from_isp *a = &prm->tm_iocb2;
+	struct scsi_qla_host *vha = tgt->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct fc_port *sess = NULL;
+	unsigned long flags;
+	uint8_t *s_id = NULL; /* to hide compiler warnings */
+	int rc;
+	u64 unpacked_lun;
+	int fn;
+	void *iocb;
+
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+
+	if (tgt->tgt_stop)
+		goto out_term2;
+
+	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+	if (!sess) {
+		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+		sess = qlt_make_local_sess(vha, s_id);
+		/* sess has got an extra creation ref */
+
+		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+		if (!sess)
+			goto out_term2;
+	} else {
+		if (sess->deleted) {
+			sess = NULL;
+			goto out_term2;
+		}
+
+		if (!kref_get_unless_zero(&sess->sess_kref)) {
+			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
+			    "%s: kref_get fail %8phC\n",
+			     __func__, sess->port_name);
+			sess = NULL;
+			goto out_term2;
+		}
+	}
+
+	iocb = a;
+	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+	unpacked_lun =
+	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
+
+	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+	ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	if (rc != 0)
+		goto out_term;
+	return;
+
+out_term2:
+	if (sess)
+		ha->tgt.tgt_ops->put_sess(sess);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+out_term:
+	qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+	struct scsi_qla_host *vha = tgt->vha;
+	unsigned long flags;
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+	spin_lock_irqsave(&tgt->sess_work_lock, flags);
+	while (!list_empty(&tgt->sess_works_list)) {
+		struct qla_tgt_sess_work_param *prm = list_entry(
+		    tgt->sess_works_list.next, typeof(*prm),
+		    sess_works_list_entry);
+
+		/*
+		 * This work can be scheduled on several CPUs at time, so we
+		 * must delete the entry to eliminate double processing
+		 */
+		list_del(&prm->sess_works_list_entry);
+
+		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+		switch (prm->type) {
+		case QLA_TGT_SESS_WORK_ABORT:
+			qlt_abort_work(tgt, prm);
+			break;
+		case QLA_TGT_SESS_WORK_TM:
+			qlt_tmr_work(tgt, prm);
+			break;
+		default:
+			BUG_ON(1);
+			break;
+		}
+
+		spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+		kfree(prm);
+	}
+	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+	struct qla_tgt *tgt;
+	int rc, i;
+	struct qla_qpair_hint *h;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return 0;
+
+	if (!IS_TGT_MODE_CAPABLE(ha)) {
+		ql_log(ql_log_warn, base_vha, 0xe070,
+		    "This adapter does not support target mode.\n");
+		return 0;
+	}
+
+	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
+
+	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
+
+	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+	if (!tgt) {
+		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+		    "Unable to allocate struct qla_tgt\n");
+		return -ENOMEM;
+	}
+
+	tgt->qphints = kzalloc((ha->max_qpairs + 1) *
+	    sizeof(struct qla_qpair_hint), GFP_KERNEL);
+	if (!tgt->qphints) {
+		kfree(tgt);
+		ql_log(ql_log_warn, base_vha, 0x0197,
+		    "Unable to allocate qpair hints.\n");
+		return -ENOMEM;
+	}
+
+	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+		base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+	rc = btree_init64(&tgt->lun_qpair_map);
+	if (rc) {
+		kfree(tgt->qphints);
+		kfree(tgt);
+		ql_log(ql_log_info, base_vha, 0x0198,
+			"Unable to initialize lun_qpair_map btree\n");
+		return -EIO;
+	}
+	h = &tgt->qphints[0];
+	h->qpair = ha->base_qpair;
+	INIT_LIST_HEAD(&h->hint_elem);
+	h->cpuid = ha->base_qpair->cpuid;
+	list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
+
+	for (i = 0; i < ha->max_qpairs; i++) {
+		unsigned long flags;
+
+		struct qla_qpair *qpair = ha->queue_pair_map[i];
+		h = &tgt->qphints[i + 1];
+		INIT_LIST_HEAD(&h->hint_elem);
+		if (qpair) {
+			h->qpair = qpair;
+			spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+			list_add_tail(&h->hint_elem, &qpair->hints_list);
+			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+			h->cpuid = qpair->cpuid;
+		}
+	}
+
+	tgt->ha = ha;
+	tgt->vha = base_vha;
+	init_waitqueue_head(&tgt->waitQ);
+	INIT_LIST_HEAD(&tgt->del_sess_list);
+	spin_lock_init(&tgt->sess_work_lock);
+	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+	INIT_LIST_HEAD(&tgt->sess_works_list);
+	atomic_set(&tgt->tgt_global_resets_count, 0);
+
+	base_vha->vha_tgt.qla_tgt = tgt;
+
+	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+		"qla_target(%d): using 64 Bit PCI addressing",
+		base_vha->vp_idx);
+	/* 3 is reserved */
+	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+
+	mutex_lock(&qla_tgt_mutex);
+	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+	mutex_unlock(&qla_tgt_mutex);
+
+	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+		ha->tgt.tgt_ops->add_target(base_vha);
+
+	return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+	if (!vha->vha_tgt.qla_tgt)
+		return 0;
+
+	if (vha->fc_vport) {
+		qlt_release(vha->vha_tgt.qla_tgt);
+		return 0;
+	}
+
+	/* free left over qfull cmds */
+	qlt_init_term_exchange(vha);
+
+	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+	    vha->host_no, ha);
+	qlt_release(vha->vha_tgt.qla_tgt);
+
+	return 0;
+}
+
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+	struct scsi_qla_host *node;
+	u32 key = 0;
+
+	btree_for_each_safe32(&ha->tgt.host_map, key, node)
+		btree_remove32(&ha->tgt.host_map, key);
+
+	btree_destroy32(&ha->tgt.host_map);
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+	unsigned char *b)
+{
+	int i;
+
+	pr_debug("qla2xxx HW vha->node_name: ");
+	for (i = 0; i < WWN_SIZE; i++)
+		pr_debug("%02x ", vha->node_name[i]);
+	pr_debug("\n");
+	pr_debug("qla2xxx HW vha->port_name: ");
+	for (i = 0; i < WWN_SIZE; i++)
+		pr_debug("%02x ", vha->port_name[i]);
+	pr_debug("\n");
+
+	pr_debug("qla2xxx passed configfs WWPN: ");
+	put_unaligned_be64(wwpn, b);
+	for (i = 0; i < WWN_SIZE; i++)
+		pr_debug("%02x ", b[i]);
+	pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback:  lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
+		       u64 npiv_wwpn, u64 npiv_wwnn,
+		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
+{
+	struct qla_tgt *tgt;
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha;
+	struct Scsi_Host *host;
+	unsigned long flags;
+	int rc;
+	u8 b[WWN_SIZE];
+
+	mutex_lock(&qla_tgt_mutex);
+	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+		vha = tgt->vha;
+		ha = vha->hw;
+
+		host = vha->host;
+		if (!host)
+			continue;
+
+		if (!(host->hostt->supported_mode & MODE_TARGET))
+			continue;
+
+		spin_lock_irqsave(&ha->hardware_lock, flags);
+		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
+			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+			    host->host_no);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			continue;
+		}
+		if (tgt->tgt_stop) {
+			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+				 host->host_no);
+			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+			continue;
+		}
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		if (!scsi_host_get(host)) {
+			ql_dbg(ql_dbg_tgt, vha, 0xe068,
+			    "Unable to scsi_host_get() for"
+			    " qla2xxx scsi_host\n");
+			continue;
+		}
+		qlt_lport_dump(vha, phys_wwpn, b);
+
+		if (memcmp(vha->port_name, b, WWN_SIZE)) {
+			scsi_host_put(host);
+			continue;
+		}
+		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
+		if (rc != 0)
+			scsi_host_put(host);
+
+		mutex_unlock(&qla_tgt_mutex);
+		return rc;
+	}
+	mutex_unlock(&qla_tgt_mutex);
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha:  Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct Scsi_Host *sh = vha->host;
+	/*
+	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+	 */
+	vha->vha_tgt.target_lport_ptr = NULL;
+	ha->tgt.tgt_ops = NULL;
+	/*
+	 * Release the Scsi_Host reference for the underlying qla2xxx host
+	 */
+	scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+static void qlt_set_mode(struct scsi_qla_host *vha)
+{
+	switch (ql2x_ini_mode) {
+	case QLA2XXX_INI_MODE_DISABLED:
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		vha->host->active_mode = MODE_TARGET;
+		break;
+	case QLA2XXX_INI_MODE_ENABLED:
+		vha->host->active_mode = MODE_UNKNOWN;
+		break;
+	case QLA2XXX_INI_MODE_DUAL:
+		vha->host->active_mode = MODE_DUAL;
+		break;
+	default:
+		break;
+	}
+}
+
+/* Must be called under HW lock */
+static void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+	switch (ql2x_ini_mode) {
+	case QLA2XXX_INI_MODE_DISABLED:
+		vha->host->active_mode = MODE_UNKNOWN;
+		break;
+	case QLA2XXX_INI_MODE_EXCLUSIVE:
+		vha->host->active_mode = MODE_INITIATOR;
+		break;
+	case QLA2XXX_INI_MODE_ENABLED:
+	case QLA2XXX_INI_MODE_DUAL:
+		vha->host->active_mode = MODE_INITIATOR;
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	unsigned long flags;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	if (!tgt) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe069,
+		    "Unable to locate qla_tgt pointer from"
+		    " struct qla_hw_data\n");
+		dump_stack();
+		return;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	tgt->tgt_stopped = 0;
+	qlt_set_mode(vha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (vha->vp_idx) {
+		qla24xx_disable_vp(vha);
+		qla24xx_enable_vp(vha);
+	} else {
+		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
+		WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
+			     QLA_SUCCESS);
+	}
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+static void qlt_disable_vha(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	unsigned long flags;
+
+	if (!tgt) {
+		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+		    "Unable to locate qla_tgt pointer from"
+		    " struct qla_hw_data\n");
+		dump_stack();
+		return;
+	}
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qlt_clear_mode(vha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+		ql_dbg(ql_dbg_tgt, vha, 0xe081,
+		       "qla2x00_wait_for_hba_online() failed\n");
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+	vha->vha_tgt.qla_tgt = NULL;
+
+	mutex_init(&vha->vha_tgt.tgt_mutex);
+	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
+
+	qlt_clear_mode(vha);
+
+	/*
+	 * NOTE: Currently the value is kept the same for <24xx and
+	 * >=24xx ISPs. If it is necessary to change it,
+	 * the check should be added for specific ISPs,
+	 * assigning the value appropriately.
+	 */
+	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+
+	qlt_add_target(ha, vha);
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+	/*
+	 * FC-4 Feature bit 0 indicates target functionality to the name server.
+	 */
+	if (qla_tgt_mode_enabled(vha)) {
+		ct_req->req.rff_id.fc4_feature = BIT_0;
+	} else if (qla_ini_mode_enabled(vha)) {
+		ct_req->req.rff_id.fc4_feature = BIT_1;
+	} else if (qla_dual_mode_enabled(vha))
+		ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t cnt;
+	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+	if (qla_ini_mode_enabled(vha))
+		return;
+
+	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+		pkt->u.raw.signature = ATIO_PROCESSED;
+		pkt++;
+	}
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct atio_from_isp *pkt;
+	int cnt, i;
+
+	if (!ha->flags.fw_started)
+		return;
+
+	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
+		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+		cnt = pkt->u.raw.entry_count;
+
+		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+			/*
+			 * This packet is corrupted. The header + payload
+			 * can not be trusted. There is no point in passing
+			 * it further up.
+			 */
+			ql_log(ql_log_warn, vha, 0xd03c,
+			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+			    pkt->u.isp24.fcp_hdr.s_id,
+			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+			    le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+			adjust_corrupted_atio(pkt);
+			qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
+			    ha_locked, 0);
+		} else {
+			qlt_24xx_atio_pkt_all_vps(vha,
+			    (struct atio_from_isp *)pkt, ha_locked);
+		}
+
+		for (i = 0; i < cnt; i++) {
+			ha->tgt.atio_ring_index++;
+			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+				ha->tgt.atio_ring_index = 0;
+				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+			} else
+				ha->tgt.atio_ring_ptr++;
+
+			pkt->u.raw.signature = ATIO_PROCESSED;
+			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+		}
+		wmb();
+	}
+
+	/* Adjust ring index */
+	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+	if (IS_ATIO_MSIX_CAPABLE(ha)) {
+		struct qla_msix_entry *msix = &ha->msix_entries[2];
+		struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+		icb->msix_atio = cpu_to_le16(msix->entry);
+		ql_dbg(ql_dbg_init, vha, 0xf072,
+		    "Registering ICB vector 0x%x for atio que.\n",
+		    msix->entry);
+	}
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+		if (!ha->tgt.saved_set) {
+			/* We save only once */
+			ha->tgt.saved_exchange_count = nv->exchange_count;
+			ha->tgt.saved_firmware_options_1 =
+			    nv->firmware_options_1;
+			ha->tgt.saved_firmware_options_2 =
+			    nv->firmware_options_2;
+			ha->tgt.saved_firmware_options_3 =
+			    nv->firmware_options_3;
+			ha->tgt.saved_set = 1;
+		}
+
+		if (qla_tgt_mode_enabled(vha))
+			nv->exchange_count = cpu_to_le16(0xFFFF);
+		else			/* dual */
+			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+
+		/* Enable target mode */
+		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
+
+		/* Disable ini mode, if requested */
+		if (qla_tgt_mode_enabled(vha))
+			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
+
+		/* Disable Full Login after LIP */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
+		/* Enable initial LIP */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+		if (ql2xtgt_tape_enable)
+			/* Enable FC Tape support */
+			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		else
+			/* Disable FC Tape support */
+			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
+		/* Disable Full Login after LIP */
+		nv->host_p &= cpu_to_le32(~BIT_10);
+
+		/*
+		 * clear BIT 15 explicitly as we have seen at least
+		 * a couple of instances where this was set and this
+		 * was causing the firmware to not be initialized.
+		 */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
+		/* Enable target PRLI control */
+		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+	} else {
+		if (ha->tgt.saved_set) {
+			nv->exchange_count = ha->tgt.saved_exchange_count;
+			nv->firmware_options_1 =
+			    ha->tgt.saved_firmware_options_1;
+			nv->firmware_options_2 =
+			    ha->tgt.saved_firmware_options_2;
+			nv->firmware_options_3 =
+			    ha->tgt.saved_firmware_options_3;
+		}
+		return;
+	}
+
+	if (ha->base_qpair->enable_class_2) {
+		if (vha->flags.init_done)
+			fc_host_supported_classes(vha->host) =
+				FC_COS_CLASS2 | FC_COS_CLASS3;
+
+		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
+	} else {
+		if (vha->flags.init_done)
+			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
+	}
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+	struct init_cb_24xx *icb)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if (ha->tgt.node_name_set) {
+		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
+	}
+
+	/* disable ZIO at start time. */
+	if (!vha->flags.init_done) {
+		uint32_t tmp;
+		tmp = le32_to_cpu(icb->firmware_options_2);
+		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		icb->firmware_options_2 = cpu_to_le32(tmp);
+	}
+}
+
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+		if (!ha->tgt.saved_set) {
+			/* We save only once */
+			ha->tgt.saved_exchange_count = nv->exchange_count;
+			ha->tgt.saved_firmware_options_1 =
+			    nv->firmware_options_1;
+			ha->tgt.saved_firmware_options_2 =
+			    nv->firmware_options_2;
+			ha->tgt.saved_firmware_options_3 =
+			    nv->firmware_options_3;
+			ha->tgt.saved_set = 1;
+		}
+
+		if (qla_tgt_mode_enabled(vha))
+			nv->exchange_count = cpu_to_le16(0xFFFF);
+		else			/* dual */
+			nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+
+		/* Enable target mode */
+		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
+
+		/* Disable ini mode, if requested */
+		if (qla_tgt_mode_enabled(vha))
+			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
+		/* Disable Full Login after LIP */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
+		/* Enable initial LIP */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+		/*
+		 * clear BIT 15 explicitly as we have seen at
+		 * least a couple of instances where this was set
+		 * and this was causing the firmware to not be
+		 * initialized.
+		 */
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
+		if (ql2xtgt_tape_enable)
+			/* Enable FC tape support */
+			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		else
+			/* Disable FC tape support */
+			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
+		/* Disable Full Login after LIP */
+		nv->host_p &= cpu_to_le32(~BIT_10);
+		/* Enable target PRLI control */
+		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+	} else {
+		if (ha->tgt.saved_set) {
+			nv->exchange_count = ha->tgt.saved_exchange_count;
+			nv->firmware_options_1 =
+			    ha->tgt.saved_firmware_options_1;
+			nv->firmware_options_2 =
+			    ha->tgt.saved_firmware_options_2;
+			nv->firmware_options_3 =
+			    ha->tgt.saved_firmware_options_3;
+		}
+		return;
+	}
+
+	if (ha->base_qpair->enable_class_2) {
+		if (vha->flags.init_done)
+			fc_host_supported_classes(vha->host) =
+				FC_COS_CLASS2 | FC_COS_CLASS3;
+
+		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
+	} else {
+		if (vha->flags.init_done)
+			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
+	}
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+	struct init_cb_81xx *icb)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if (ha->tgt.node_name_set) {
+		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
+	}
+
+	/* disable ZIO at start time. */
+	if (!vha->flags.init_done) {
+		uint32_t tmp;
+		tmp = le32_to_cpu(icb->firmware_options_2);
+		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
+		icb->firmware_options_2 = cpu_to_le32(tmp);
+	}
+
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	ha->msix_count += 1; /* For ATIO Q */
+}
+
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+	struct vp_config_entry_24xx *vpmod)
+{
+	/* enable target mode.  Bit5 = 1 => disable */
+	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
+		vpmod->options_idx1 &= ~BIT_5;
+
+	/* Disable ini mode, if requested.  bit4 = 1 => disable */
+	if (qla_tgt_mode_enabled(vha))
+		vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+	int rc;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if  (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+	} else {
+		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+	}
+
+	mutex_init(&base_vha->vha_tgt.tgt_mutex);
+	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
+
+	INIT_LIST_HEAD(&base_vha->unknown_atio_list);
+	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
+	    qlt_unknown_atio_work_fn);
+
+	qlt_clear_mode(base_vha);
+
+	rc = btree_init32(&ha->tgt.host_map);
+	if (rc)
+		ql_log(ql_log_info, base_vha, 0xd03d,
+		    "Unable to initialize ha->host_map btree\n");
+
+	qlt_update_vp_map(base_vha, SET_VP_IDX);
+}
+
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+	struct rsp_que *rsp;
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	unsigned long flags;
+
+	rsp = (struct rsp_que *) dev_id;
+	ha = rsp->hw;
+	vha = pci_get_drvdata(ha->pdev);
+
+	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+
+	qlt_24xx_process_atio_queue(vha, 0);
+
+	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static void
+qlt_handle_abts_recv_work(struct work_struct *work)
+{
+	struct qla_tgt_sess_op *op = container_of(work,
+		struct qla_tgt_sess_op, work);
+	scsi_qla_host_t *vha = op->vha;
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+
+	if (qla2x00_reset_active(vha) ||
+	    (op->chip_reset != ha->base_qpair->chip_reset))
+		return;
+
+	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+	qlt_24xx_process_atio_queue(vha, 0);
+	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	kfree(op);
+}
+
+void
+qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
+    response_t *pkt)
+{
+	struct qla_tgt_sess_op *op;
+
+	op = kzalloc(sizeof(*op), GFP_ATOMIC);
+
+	if (!op) {
+		/* do not reach for ATIO queue here.  This is best effort err
+		 * recovery at this point.
+		 */
+		qlt_response_pkt_all_vps(vha, rsp, pkt);
+		return;
+	}
+
+	memcpy(&op->atio, pkt, sizeof(*pkt));
+	op->vha = vha;
+	op->chip_reset = vha->hw->base_qpair->chip_reset;
+	op->rsp = rsp;
+	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
+	queue_work(qla_tgt_wq, &op->work);
+	return;
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return 0;
+
+	ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+	    MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+	if (!ha->tgt.tgt_vp_map)
+		return -ENOMEM;
+
+	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+	    &ha->tgt.atio_dma, GFP_KERNEL);
+	if (!ha->tgt.atio_ring) {
+		kfree(ha->tgt.tgt_vp_map);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	if (ha->tgt.atio_ring) {
+		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+		    ha->tgt.atio_dma);
+	}
+	kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+	void *slot;
+	u32 key;
+	int rc;
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	key = vha->d_id.b24;
+
+	switch (cmd) {
+	case SET_VP_IDX:
+		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+		break;
+	case SET_AL_PA:
+		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+		if (!slot) {
+			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
+			    "Save vha in host_map %p %06x\n", vha, key);
+			rc = btree_insert32(&vha->hw->tgt.host_map,
+				key, vha, GFP_ATOMIC);
+			if (rc)
+				ql_log(ql_log_info, vha, 0xd03e,
+				    "Unable to insert s_id into host_map: %06x\n",
+				    key);
+			return;
+		}
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+		    "replace existing vha in host_map %p %06x\n", vha, key);
+		btree_update32(&vha->hw->tgt.host_map, key, vha);
+		break;
+	case RESET_VP_IDX:
+		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+		break;
+	case RESET_AL_PA:
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+		   "clear vha in host_map %p %06x\n", vha, key);
+		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+		if (slot)
+			btree_remove32(&vha->hw->tgt.host_map, key);
+		vha->d_id.b24 = 0;
+		break;
+	}
+}
+
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+	unsigned long flags;
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!vha->d_id.b24) {
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		vha->d_id = id;
+		qlt_update_vp_map(vha, SET_AL_PA);
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+	} else if (vha->d_id.b24 != id.b24) {
+		spin_lock_irqsave(&ha->vport_slock, flags);
+		qlt_update_vp_map(vha, RESET_AL_PA);
+		vha->d_id = id;
+		qlt_update_vp_map(vha, SET_AL_PA);
+		spin_unlock_irqrestore(&ha->vport_slock, flags);
+	}
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
+		ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
+	else
+		return false;
+
+	return true;
+}
+
+int __init qlt_init(void)
+{
+	int ret;
+
+	if (!qlt_parse_ini_mode()) {
+		ql_log(ql_log_fatal, NULL, 0xe06b,
+		    "qlt_parse_ini_mode() failed\n");
+		return -EINVAL;
+	}
+
+	if (!QLA_TGT_MODE_ENABLED())
+		return 0;
+
+	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+	    qla_tgt_mgmt_cmd), 0, NULL);
+	if (!qla_tgt_mgmt_cmd_cachep) {
+		ql_log(ql_log_fatal, NULL, 0xd04b,
+		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+		return -ENOMEM;
+	}
+
+	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
+	    sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
+	    0, NULL);
+
+	if (!qla_tgt_plogi_cachep) {
+		ql_log(ql_log_fatal, NULL, 0xe06d,
+		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
+		ret = -ENOMEM;
+		goto out_mgmt_cmd_cachep;
+	}
+
+	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+	if (!qla_tgt_mgmt_cmd_mempool) {
+		ql_log(ql_log_fatal, NULL, 0xe06e,
+		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+		ret = -ENOMEM;
+		goto out_plogi_cachep;
+	}
+
+	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+	if (!qla_tgt_wq) {
+		ql_log(ql_log_fatal, NULL, 0xe06f,
+		    "alloc_workqueue for qla_tgt_wq failed\n");
+		ret = -ENOMEM;
+		goto out_cmd_mempool;
+	}
+	/*
+	 * Return 1 to signal that initiator-mode is being disabled
+	 */
+	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_plogi_cachep:
+	kmem_cache_destroy(qla_tgt_plogi_cachep);
+out_mgmt_cmd_cachep:
+	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+	return ret;
+}
+
+void qlt_exit(void)
+{
+	if (!QLA_TGT_MODE_ENABLED())
+		return;
+
+	destroy_workqueue(qla_tgt_wq);
+	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+	kmem_cache_destroy(qla_tgt_plogi_cachep);
+	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_target.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 0000000..511a31b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1107 @@
+/*
+ *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ *  Copyright (C) 2004 - 2005 Leonid Stoljar
+ *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ *  Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ *  Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ *  Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ *  Additional file for the target driver support.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *  GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC	269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC   57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE	"exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED	"disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED	"enabled"
+#define QLA2XXX_INI_MODE_STR_DUAL		"dual"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE	0
+#define QLA2XXX_INI_MODE_DISABLED	1
+#define QLA2XXX_INI_MODE_ENABLED	2
+#define QLA2XXX_INI_MODE_DUAL	3
+
+#define QLA2XXX_COMMAND_COUNT_INIT	250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK	BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+	"DEFAULT_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK	BIT_30
+#define QLA_TGT_NULL_HANDLE	0
+
+#define QLA_TGT_HANDLE_MASK  0xF0000000
+#define QLA_QPID_HANDLE_MASK 0x00FF0000 /* qpair id mask */
+#define QLA_CMD_HANDLE_MASK  0x0000FFFF
+#define QLA_TGT_SKIP_HANDLE	(0xFFFFFFFF & ~QLA_TGT_HANDLE_MASK)
+
+#define QLA_QPID_HANDLE_SHIFT 16
+#define GET_QID(_h) ((_h & QLA_QPID_HANDLE_MASK) >> QLA_QPID_HANDLE_SHIFT)
+
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0        0
+#define OF_SS_MODE_1        1
+#define OF_SS_MODE_2        2
+#define OF_SS_MODE_3        3
+
+#define OF_EXPL_CONF        BIT_5       /* Explicit Confirmation Requested */
+#define OF_DATA_IN          BIT_6       /* Data in to initiator */
+					/*  (data from target to initiator) */
+#define OF_DATA_OUT         BIT_7       /* Data out from initiator */
+					/*  (data from initiator to target) */
+#define OF_NO_DATA          (BIT_7 | BIT_6)
+#define OF_INC_RC           BIT_8       /* Increment command resource count */
+#define OF_FAST_POST        BIT_9       /* Enable mailbox fast posting. */
+#define OF_CONF_REQ         BIT_13      /* Confirmation Requested */
+#define OF_TERM_EXCH        BIT_14      /* Terminate exchange */
+#define OF_SSTS             BIT_15      /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32	3
+#define QLA_TGT_DATASEGS_PER_CONT32	7
+#define QLA_TGT_MAX_SG32(ql) \
+	(((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+		QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64	2
+#define QLA_TGT_DATASEGS_PER_CONT64	5
+#define QLA_TGT_MAX_SG64(ql) \
+	(((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+		QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX	1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX	5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+	(min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+		QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha))			\
+			 ? le16_to_cpu((iocb)->u.isp2x.target.extended)	\
+			 : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E	  /* Notify acknowledge entry. */
+/*
+ * ISP queue -	notify acknowledge entry structure definition.
+ *		This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	union {
+		struct {
+			uint32_t sys_define_2; /* System defined. */
+			target_id_t target;
+			uint8_t	 target_id;
+			uint8_t	 reserved_1;
+			uint16_t flags;
+			uint16_t resp_code;
+			uint16_t status;
+			uint16_t task_flags;
+			uint16_t seq_id;
+			uint16_t srr_rx_id;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+			uint16_t srr_flags;
+			uint16_t srr_reject_code;
+			uint8_t  srr_reject_vendor_uniq;
+			uint8_t  srr_reject_code_expl;
+			uint8_t  reserved_2[24];
+		} isp2x;
+		struct {
+			uint32_t handle;
+			uint16_t nport_handle;
+			uint16_t reserved_1;
+			uint16_t flags;
+			uint16_t srr_rx_id;
+			uint16_t status;
+			uint8_t  status_subcode;
+			uint8_t  fw_handle;
+			uint32_t exchange_address;
+			uint32_t srr_rel_offs;
+			uint16_t srr_ui;
+			uint16_t srr_flags;
+			uint8_t  reserved_4[19];
+			uint8_t  vp_index;
+			uint8_t  srr_reject_vendor_uniq;
+			uint8_t  srr_reject_code_expl;
+			uint8_t  srr_reject_code;
+			uint8_t  reserved_5[5];
+		} isp24;
+	} u;
+	uint8_t  reserved[2];
+	uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_FLAGS_TERMINATE	BIT_3
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT	0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT	1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM	0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL		0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA	0x2a
+
+#define NOTIFY_ACK_SUCCESS      0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue -	Continue Target I/O (CTIO) entry for status mode 0 structure.
+ *		This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+	uint8_t	 entry_type;		/* Entry type. */
+	uint8_t	 entry_count;		/* Entry count. */
+	uint8_t	 sys_define;		/* System defined. */
+	uint8_t	 entry_status;		/* Entry Status. */
+	uint32_t handle;		/* System defined handle */
+	target_id_t target;
+	uint16_t rx_id;
+	uint16_t flags;
+	uint16_t status;
+	uint16_t timeout;		/* 0 = 30 seconds, 0xFFFF = disable */
+	uint16_t dseg_count;		/* Data segment count. */
+	uint32_t relative_offset;
+	uint32_t residual;
+	uint16_t reserved_1[3];
+	uint16_t scsi_status;
+	uint32_t transfer_length;
+	uint32_t dseg_0_address;	/* Data segment 0 address. */
+	uint32_t dseg_0_length;		/* Data segment 0 length. */
+	uint32_t dseg_1_address;	/* Data segment 1 address. */
+	uint32_t dseg_1_length;		/* Data segment 1 length. */
+	uint32_t dseg_2_address;	/* Data segment 2 address. */
+	uint32_t dseg_2_length;		/* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID       0x07
+#define ATIO_CANT_PROV_CAP      0x16
+#define ATIO_CDB_VALID          0x3D
+
+#define ATIO_EXEC_READ          BIT_1
+#define ATIO_EXEC_WRITE         BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS			0x01
+#define CTIO_ABORTED			0x02
+#define CTIO_INVALID_RX_ID		0x08
+#define CTIO_TIMEOUT			0x0B
+#define CTIO_DIF_ERROR			0x0C     /* DIF error detected  */
+#define CTIO_LIP_RESET			0x0E
+#define CTIO_TARGET_RESET		0x17
+#define CTIO_PORT_UNAVAILABLE		0x28
+#define CTIO_PORT_LOGGED_OUT		0x29
+#define CTIO_PORT_CONF_CHANGED		0x2A
+#define CTIO_SRR_RECEIVED		0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE	0x17		/* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+	uint8_t  r_ctl;
+	uint8_t  d_id[3];
+	uint8_t  cs_ctl;
+	uint8_t  s_id[3];
+	uint8_t  type;
+	uint8_t  f_ctl[3];
+	uint8_t  seq_id;
+	uint8_t  df_ctl;
+	uint16_t seq_cnt;
+	__be16   ox_id;
+	uint16_t rx_id;
+	uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+	uint8_t  d_id[3];
+	uint8_t  r_ctl;
+	uint8_t  s_id[3];
+	uint8_t  cs_ctl;
+	uint8_t  f_ctl[3];
+	uint8_t  type;
+	uint16_t seq_cnt;
+	uint8_t  df_ctl;
+	uint8_t  seq_id;
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP	BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP	BIT_22
+#define F_CTL_LAST_SEQ		BIT_20
+#define F_CTL_END_SEQ		BIT_19
+#define F_CTL_SEQ_INITIATIVE	BIT_16
+
+#define R_CTL_BASIC_LINK_SERV	0x80
+#define R_CTL_B_ACC		0x4
+#define R_CTL_B_RJT		0x5
+
+struct atio7_fcp_cmnd {
+	uint64_t lun;
+	uint8_t  cmnd_ref;
+	uint8_t  task_attr:3;
+	uint8_t  reserved:5;
+	uint8_t  task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA		6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET		5
+#define FCP_CMND_TASK_MGMT_LU_RESET		4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET	2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET	1
+	uint8_t  wrdata:1;
+	uint8_t  rddata:1;
+	uint8_t  add_cdb_len:6;
+	uint8_t  cdb[16];
+	/*
+	 * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+	 * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+	 * BUILD_BUG_ON in qlt_init().
+	 */
+	uint8_t  add_cdb[4];
+	/* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue -	Accept Target I/O (ATIO) type entry IOCB structure.
+ *		This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+	union {
+		struct {
+			uint16_t entry_hdr;
+			uint8_t  sys_define;   /* System defined. */
+			uint8_t  entry_status; /* Entry Status.   */
+			uint32_t sys_define_2; /* System defined. */
+			target_id_t target;
+			uint16_t rx_id;
+			uint16_t flags;
+			uint16_t status;
+			uint8_t  command_ref;
+			uint8_t  task_codes;
+			uint8_t  task_flags;
+			uint8_t  execution_codes;
+			uint8_t  cdb[MAX_CMDSZ];
+			uint32_t data_length;
+			uint16_t lun;
+			uint8_t  initiator_port_name[WWN_SIZE]; /* on qla23xx */
+			uint16_t reserved_32[6];
+			uint16_t ox_id;
+		} isp2x;
+		struct {
+			uint16_t entry_hdr;
+			uint8_t  fcp_cmnd_len_low;
+			uint8_t  fcp_cmnd_len_high:4;
+			uint8_t  attr:4;
+			uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN	0xFFFFFFFF
+			struct fcp_hdr fcp_hdr;
+			struct atio7_fcp_cmnd fcp_cmnd;
+		} isp24;
+		struct {
+			uint8_t  entry_type;	/* Entry type. */
+			uint8_t  entry_count;	/* Entry count. */
+			__le16	 attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN  0x38
+			uint8_t  data[56];
+			uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD		/* Signature */
+		} raw;
+	} u;
+} __packed;
+
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+	if (atio->entry_type == ATIO_TYPE7 &&
+	    ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+	     FCP_CMD_LENGTH_MIN))
+		return 1;
+	else
+		return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+	atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+	atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+	int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+	return (be32_to_cpu(get_unaligned((uint32_t *)
+	    &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue -	Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ *		This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;		    /* System defined handle */
+	uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED	0xFFFF
+	uint16_t timeout;
+	uint16_t dseg_count;		    /* Data segment count. */
+	uint8_t  vp_index;
+	uint8_t  add_flags;
+	uint8_t  initiator_id[3];
+	uint8_t  reserved;
+	uint32_t exchange_addr;
+	union {
+		struct {
+			uint16_t reserved1;
+			__le16 flags;
+			uint32_t residual;
+			__le16 ox_id;
+			uint16_t scsi_status;
+			uint32_t relative_offset;
+			uint32_t reserved2;
+			uint32_t transfer_length;
+			uint32_t reserved3;
+			/* Data segment 0 address. */
+			uint32_t dseg_0_address[2];
+			/* Data segment 0 length. */
+			uint32_t dseg_0_length;
+		} status0;
+		struct {
+			uint16_t sense_length;
+			__le16 flags;
+			uint32_t residual;
+			__le16 ox_id;
+			uint16_t scsi_status;
+			uint16_t response_len;
+			uint16_t reserved;
+			uint8_t sense_data[24];
+		} status1;
+	} u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;		    /* System defined handle */
+	uint16_t status;
+	uint16_t timeout;
+	uint16_t dseg_count;		    /* Data segment count. */
+	uint8_t  vp_index;
+	uint8_t  reserved1[5];
+	uint32_t exchange_address;
+	uint16_t reserved2;
+	uint16_t flags;
+	uint32_t residual;
+	uint16_t ox_id;
+	uint16_t reserved3;
+	uint32_t relative_offset;
+	uint8_t  reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS		BIT_15
+#define CTIO7_FLAGS_TERMINATE		BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ		BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO	BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0	0
+#define CTIO7_FLAGS_STATUS_MODE_1	BIT_6
+#define CTIO7_FLAGS_STATUS_MODE_2	BIT_7
+#define CTIO7_FLAGS_EXPLICIT_CONFORM	BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF	BIT_4
+#define CTIO7_FLAGS_DSD_PTR		BIT_2
+#define CTIO7_FLAGS_DATA_IN		BIT_1 /* data to initiator */
+#define CTIO7_FLAGS_DATA_OUT		BIT_0 /* data from initiator */
+
+#define ELS_PLOGI			0x3
+#define ELS_FLOGI			0x4
+#define ELS_LOGO			0x5
+#define ELS_PRLI			0x20
+#define ELS_PRLO			0x21
+#define ELS_TPRLO			0x24
+#define ELS_PDISC			0x50
+#define ELS_ADISC			0x52
+
+/*
+ *CTIO Type CRC_2 IOCB
+ */
+struct ctio_crc2_to_fw {
+	uint8_t entry_type;		/* Entry type. */
+#define CTIO_CRC2 0x7A
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint16_t nport_handle;		/* N_PORT handle. */
+	__le16 timeout;		/* Command timeout. */
+
+	uint16_t dseg_count;		/* Data segment count. */
+	uint8_t  vp_index;
+	uint8_t  add_flags;		/* additional flags */
+#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
+
+	uint8_t  initiator_id[3];	/* initiator ID */
+	uint8_t  reserved1;
+	uint32_t exchange_addr;		/* rcv exchange address */
+	uint16_t reserved2;
+	__le16 flags;			/* refer to CTIO7 flags values */
+	uint32_t residual;
+	__le16 ox_id;
+	uint16_t scsi_status;
+	__le32 relative_offset;
+	uint32_t reserved5;
+	__le32 transfer_length;		/* total fc transfer length */
+	uint32_t reserved6;
+	__le32 crc_context_address[2];/* Data segment address. */
+	uint16_t crc_context_len;	/* Data segment length. */
+	uint16_t reserved_1;		/* MUST be set to 0. */
+} __packed;
+
+/* CTIO Type CRC_x Status IOCB */
+struct ctio_crc_from_fw {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint16_t status;
+	uint16_t timeout;		/* Command timeout. */
+	uint16_t dseg_count;		/* Data segment count. */
+	uint32_t reserved1;
+	uint16_t state_flags;
+#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
+
+	uint32_t exchange_address;	/* rcv exchange address */
+	uint16_t reserved2;
+	uint16_t flags;
+	uint32_t resid_xfer_length;
+	uint16_t ox_id;
+	uint8_t  reserved3[12];
+	uint16_t runt_guard;		/* reported runt blk guard */
+	uint8_t  actual_dif[8];
+	uint8_t  expected_dif[8];
+} __packed;
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX		0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX		0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue -	ABTS received IOCB entry structure definition for 24xx.
+ *		The ABTS BLS received from the wire is sent to the
+ *		target driver by the ISP 24xx.
+ *		The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint8_t  reserved_1[6];
+	uint16_t nport_handle;
+	uint8_t  reserved_2[2];
+	uint8_t  vp_index;
+	uint8_t  reserved_3:4;
+	uint8_t  sof_type:4;
+	uint32_t exchange_address;
+	struct fcp_hdr_le fcp_hdr_le;
+	uint8_t  reserved_4[16];
+	uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ		BIT_0
+
+struct ba_acc_le {
+	uint16_t reserved;
+	uint8_t  seq_id_last;
+	uint8_t  seq_id_valid;
+#define SEQ_ID_VALID	0x80
+#define SEQ_ID_INVALID	0x00
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint16_t high_seq_cnt;
+	uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+	uint8_t vendor_uniq;
+	uint8_t reason_expl;
+	uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND	0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM	0x9
+	uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue -	ABTS Response IOCB entry structure definition for 24xx.
+ *		The ABTS response to the ABTS received is sent by the
+ *		target driver to the ISP 24xx.
+ *		The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;
+	uint16_t reserved_1;
+	uint16_t nport_handle;
+	uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG	BIT_0
+	uint8_t  vp_index;
+	uint8_t  reserved_3:4;
+	uint8_t  sof_type:4;
+	uint32_t exchange_address;
+	struct fcp_hdr_le fcp_hdr_le;
+	union {
+		struct ba_acc_le ba_acct;
+		struct ba_rjt_le ba_rjt;
+	} __packed payload;
+	uint32_t reserved_4;
+	uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue -	ABTS Response IOCB from ISP24xx Firmware entry structure.
+ *		The ABTS response with completion status to the ABTS response
+ *		(sent by the target driver to the ISP 24xx) is sent by the
+ *		ISP24xx firmware to the target driver.
+ *		The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;
+	uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS		0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR	0x31
+	uint16_t nport_handle;
+	uint16_t reserved_1;
+	uint8_t  reserved_2;
+	uint8_t  reserved_3:4;
+	uint8_t  sof_type:4;
+	uint32_t exchange_address;
+	struct fcp_hdr_le fcp_hdr_le;
+	uint8_t reserved_4[8];
+	uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM	0x1E
+	uint32_t error_subcode2;
+	uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct fc_port;
+struct qla_tgt_cmd;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+	int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+			unsigned char *, uint32_t, int, int, int);
+	void (*handle_data)(struct qla_tgt_cmd *);
+	int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
+			uint32_t);
+	void (*free_cmd)(struct qla_tgt_cmd *);
+	void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+	void (*free_session)(struct fc_port *);
+
+	int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+					struct fc_port *);
+	void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool);
+	struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+						const uint16_t);
+	struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *,
+						const uint8_t *);
+	void (*clear_nacl_from_fcport_map)(struct fc_port *);
+	void (*put_sess)(struct fc_port *);
+	void (*shutdown_sess)(struct fc_port *);
+	int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+	int (*chk_dif_tags)(uint32_t tag);
+	void (*add_target)(struct scsi_qla_host *);
+	void (*remove_target)(struct scsi_qla_host *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT			10	/* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME	60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET          0x000E
+#define IMM_NTFY_LIP_LINK_REINIT    0x000F
+#define IMM_NTFY_IOCB_OVERFLOW      0x0016
+#define IMM_NTFY_ABORT_TASK         0x0020
+#define IMM_NTFY_PORT_LOGOUT        0x0029
+#define IMM_NTFY_PORT_CONFIG        0x002A
+#define IMM_NTFY_GLBL_TPRLO         0x002D
+#define IMM_NTFY_GLBL_LOGO          0x002E
+#define IMM_NTFY_RESOURCE           0x0034
+#define IMM_NTFY_MSG_RX             0x0036
+#define IMM_NTFY_SRR                0x0045
+#define IMM_NTFY_ELS                0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT    8
+
+#define QLA_TGT_CLEAR_ACA               0x40
+#define QLA_TGT_TARGET_RESET            0x20
+#define QLA_TGT_LUN_RESET               0x10
+#define QLA_TGT_CLEAR_TS                0x04
+#define QLA_TGT_ABORT_TS                0x02
+#define QLA_TGT_ABORT_ALL_SESS          0xFFFF
+#define QLA_TGT_ABORT_ALL               0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS         0xFFFD
+#define QLA_TGT_NEXUS_LOSS              0xFFFC
+#define QLA_TGT_ABTS			0xFFFB
+#define QLA_TGT_2G_ABORT_TASK		0xFFFA
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT        BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET  BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW		0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA		1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN		2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED		3 /* target done processing */
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE           0
+#define ATIO_HEAD_OF_QUEUE          1
+#define ATIO_ORDERED_QUEUE          2
+#define ATIO_ACA_QUEUE              4
+#define ATIO_UNTAGGED               5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define	FC_TM_SUCCESS               0
+#define	FC_TM_BAD_FCP_DATA          1
+#define	FC_TM_BAD_CMD               2
+#define	FC_TM_FCP_DATA_MISMATCH     3
+#define	FC_TM_REJECT                4
+#define FC_TM_FAILED                5
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense)  ((sense != NULL) && \
+				(((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+	uint8_t port_name[WWN_SIZE];
+	uint16_t loop_id;
+	uint16_t reserved;
+};
+
+struct qla_qpair_hint {
+	struct list_head hint_elem;
+	struct qla_qpair *qpair;
+	u16 cpuid;
+	uint8_t cmd_cnt;
+};
+
+struct qla_tgt {
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha;
+	struct btree_head64 lun_qpair_map;
+	struct qla_qpair_hint *qphints;
+	/*
+	 * To sync between IRQ handlers and qlt_target_release(). Needed,
+	 * because req_pkt() can drop/reaquire HW lock inside. Protected by
+	 * HW lock.
+	 */
+	int atio_irq_cmd_count;
+
+	int sg_tablesize;
+
+	/* Target's flags, serialized by pha->hardware_lock */
+	unsigned int link_reinit_iocb_pending:1;
+
+	/*
+	 * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+	 * OR hardware_lock for reading.
+	 */
+	int tgt_stop; /* the target mode driver is being stopped */
+	int tgt_stopped; /* the target mode driver has been stopped */
+
+	/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+	int sess_count;
+
+	/* Protected by hardware_lock */
+	struct list_head del_sess_list;
+
+	spinlock_t sess_work_lock;
+	struct list_head sess_works_list;
+	struct work_struct sess_work;
+
+	struct imm_ntfy_from_isp link_reinit_iocb;
+	wait_queue_head_t waitQ;
+	int notify_ack_expected;
+	int abts_resp_expected;
+	int modify_lun_expected;
+	atomic_t tgt_global_resets_count;
+	struct list_head tgt_list_entry;
+};
+
+struct qla_tgt_sess_op {
+	struct scsi_qla_host *vha;
+	uint32_t chip_reset;
+	struct atio_from_isp atio;
+	struct work_struct work;
+	struct list_head cmd_list;
+	bool aborted;
+	struct rsp_que *rsp;
+};
+
+enum trace_flags {
+	TRC_NEW_CMD = BIT_0,
+	TRC_DO_WORK = BIT_1,
+	TRC_DO_WORK_ERR = BIT_2,
+	TRC_XFR_RDY = BIT_3,
+	TRC_XMIT_DATA = BIT_4,
+	TRC_XMIT_STATUS = BIT_5,
+	TRC_SRR_RSP =  BIT_6,
+	TRC_SRR_XRDY = BIT_7,
+	TRC_SRR_TERM = BIT_8,
+	TRC_SRR_CTIO = BIT_9,
+	TRC_FLUSH = BIT_10,
+	TRC_CTIO_ERR = BIT_11,
+	TRC_CTIO_DONE = BIT_12,
+	TRC_CTIO_ABORTED =  BIT_13,
+	TRC_CTIO_STRANGE= BIT_14,
+	TRC_CMD_DONE = BIT_15,
+	TRC_CMD_CHK_STOP = BIT_16,
+	TRC_CMD_FREE = BIT_17,
+	TRC_DATA_IN = BIT_18,
+	TRC_ABORT = BIT_19,
+	TRC_DIF_ERR = BIT_20,
+};
+
+struct qla_tgt_cmd {
+	/*
+	 * Do not move cmd_type field. it needs to line up with srb->cmd_type
+	 */
+	uint8_t cmd_type;
+	uint8_t pad[7];
+	struct se_cmd se_cmd;
+	struct fc_port *sess;
+	struct qla_qpair *qpair;
+	uint32_t reset_count;
+	int state;
+	struct work_struct work;
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+	spinlock_t cmd_lock;
+	/* to save extra sess dereferences */
+	unsigned int conf_compl_supported:1;
+	unsigned int sg_mapped:1;
+	unsigned int free_sg:1;
+	unsigned int write_data_transferred:1;
+	unsigned int q_full:1;
+	unsigned int term_exchg:1;
+	unsigned int cmd_sent_to_fw:1;
+	unsigned int cmd_in_wq:1;
+	unsigned int aborted:1;
+	unsigned int data_work:1;
+	unsigned int data_work_free:1;
+
+	struct scatterlist *sg;	/* cmd data buffer SG vector */
+	int sg_cnt;		/* SG segments count */
+	int bufflen;		/* cmd buffer length */
+	int offset;
+	u64 unpacked_lun;
+	enum dma_data_direction dma_data_direction;
+
+	uint16_t vp_idx;
+	uint16_t loop_id;	/* to save extra sess dereferences */
+	struct qla_tgt *tgt;	/* to save extra sess dereferences */
+	struct scsi_qla_host *vha;
+	struct list_head cmd_list;
+
+	struct atio_from_isp atio;
+
+	uint8_t ctx_dsd_alloced;
+
+	/* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+	int8_t dif_err_code;
+	struct scatterlist *prot_sg;
+	uint32_t prot_sg_cnt;
+	uint32_t blk_sz, num_blks;
+	uint8_t scsi_status, sense_key, asc, ascq;
+
+	struct crc_context *ctx;
+	uint8_t		*cdb;
+	uint64_t	lba;
+	uint16_t	a_guard, e_guard, a_app_tag, e_app_tag;
+	uint32_t	a_ref_tag, e_ref_tag;
+
+	uint64_t jiffies_at_alloc;
+	uint64_t jiffies_at_free;
+
+	enum trace_flags trc_flags;
+};
+
+struct qla_tgt_sess_work_param {
+	struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT	1
+#define QLA_TGT_SESS_WORK_TM	2
+	int type;
+
+	union {
+		struct abts_recv_from_24xx abts;
+		struct imm_ntfy_from_isp tm_iocb;
+		struct atio_from_isp tm_iocb2;
+	};
+};
+
+struct qla_tgt_mgmt_cmd {
+	uint16_t tmr_func;
+	uint8_t fc_tm_rsp;
+	struct fc_port *sess;
+	struct qla_qpair *qpair;
+	struct scsi_qla_host *vha;
+	struct se_cmd se_cmd;
+	struct work_struct free_work;
+	unsigned int flags;
+	uint32_t reset_count;
+#define QLA24XX_MGMT_SEND_NACK	1
+	union {
+		struct atio_from_isp atio;
+		struct imm_ntfy_from_isp imm_ntfy;
+		struct abts_recv_from_24xx abts;
+	} __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+	struct qla_tgt_cmd *cmd;
+	struct qla_tgt *tgt;
+	void *pkt;
+	struct scatterlist *sg;	/* cmd data buffer SG vector */
+	unsigned char *sense_buffer;
+	int seg_cnt;
+	int req_cnt;
+	uint16_t rq_result;
+	int sense_buffer_len;
+	int residual;
+	int add_status_pkt;
+	/* dif */
+	struct scatterlist *prot_sg;
+	uint16_t prot_seg_cnt;
+	uint16_t tot_dsds;
+};
+
+/* Check for Switch reserved address */
+#define IS_SW_RESV_ADDR(_s_id) \
+	((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+
+#define QLA_TGT_XMIT_DATA		1
+#define QLA_TGT_XMIT_STATUS		2
+#define QLA_TGT_XMIT_ALL		(QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+
+extern struct qla_tgt_data qla_target;
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(void *, u64, u64, u64,
+			int (*callback)(struct scsi_qla_host *, void *, u64, u64));
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct fc_port *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+extern int ql2x_ini_mode;
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+	return ha->host->active_mode == MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+	return ha->host->active_mode == MODE_INITIATOR;
+}
+
+static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha)
+{
+	return (ha->host->active_mode == MODE_DUAL);
+}
+
+static inline uint32_t sid_to_key(const uint8_t *s_id)
+{
+	uint32_t key;
+
+	key = (((unsigned long)s_id[0] << 16) |
+	       ((unsigned long)s_id[1] << 8) |
+	       (unsigned long)s_id[2]);
+	return key;
+}
+
+static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
+{
+	memset(p, 0, sizeof(*p));
+	p->b.domain = s_id[0];
+	p->b.area = s_id[1];
+	p->b.al_pa = s_id[2];
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
+	response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern int qlt_abort_cmd(struct qla_tgt_cmd *);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+	struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+	struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+	struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+	struct nvram_81xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+	struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+	struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern int qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+extern int qlt_free_qfull_cmds(struct qla_qpair *);
+extern void qlt_logo_completion_handler(fc_port_t *, int);
+extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+
+void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
+    uint8_t, uint8_t, uint8_t);
+extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
+    struct qla_tgt_cmd *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_tmpl.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 0000000..733e8dc
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,1004 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include "qla_tmpl.h"
+
+/* note default template is in big endian */
+static const uint32_t ql27xx_fwdt_default_template[] = {
+	0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
+	0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x00000000, 0x00000000, 0x00000000,
+	0x00000000, 0x04010000, 0x14000000, 0x00000000,
+	0x02000000, 0x44000000, 0x09010000, 0x10000000,
+	0x00000000, 0x02000000, 0x01010000, 0x1c000000,
+	0x00000000, 0x02000000, 0x00600000, 0x00000000,
+	0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
+	0x02000000, 0x00600000, 0x00000000, 0xcc000000,
+	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+	0x10600000, 0x00000000, 0xd4000000, 0x01010000,
+	0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
+	0x00000060, 0xf0000000, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00700000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10700000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x40700000, 0x041000c0,
+	0x01010000, 0x1c000000, 0x00000000, 0x02000000,
+	0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
+	0x18000000, 0x00000000, 0x02000000, 0x007c0000,
+	0x040300c4, 0x00010000, 0x18000000, 0x00000000,
+	0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
+	0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
+	0x00000000, 0xc0000000, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x007c0000, 0x04200000,
+	0x0b010000, 0x18000000, 0x00000000, 0x02000000,
+	0x0c000000, 0x00000000, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
+	0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
+	0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x0a000000, 0x04200080, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00300000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x10300000, 0x041000c0, 0x00010000, 0x18000000,
+	0x00000000, 0x02000000, 0x20300000, 0x041000c0,
+	0x00010000, 0x18000000, 0x00000000, 0x02000000,
+	0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
+	0x00000000, 0x02000000, 0x06010000, 0x1c000000,
+	0x00000000, 0x02000000, 0x01000000, 0x00000200,
+	0xff230200, 0x06010000, 0x1c000000, 0x00000000,
+	0x02000000, 0x02000000, 0x00001000, 0x00000000,
+	0x07010000, 0x18000000, 0x00000000, 0x02000000,
+	0x00000000, 0x01000000, 0x07010000, 0x18000000,
+	0x00000000, 0x02000000, 0x00000000, 0x02000000,
+	0x07010000, 0x18000000, 0x00000000, 0x02000000,
+	0x00000000, 0x03000000, 0x0d010000, 0x14000000,
+	0x00000000, 0x02000000, 0x00000000, 0xff000000,
+	0x10000000, 0x00000000, 0x00000080,
+};
+
+static inline void __iomem *
+qla27xx_isp_reg(struct scsi_qla_host *vha)
+{
+	return &vha->hw->iobase->isp24;
+}
+
+static inline void
+qla27xx_insert16(uint16_t value, void *buf, ulong *len)
+{
+	if (buf) {
+		buf += *len;
+		*(__le16 *)buf = cpu_to_le16(value);
+	}
+	*len += sizeof(value);
+}
+
+static inline void
+qla27xx_insert32(uint32_t value, void *buf, ulong *len)
+{
+	if (buf) {
+		buf += *len;
+		*(__le32 *)buf = cpu_to_le32(value);
+	}
+	*len += sizeof(value);
+}
+
+static inline void
+qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
+{
+
+	if (buf && mem && size) {
+		buf += *len;
+		memcpy(buf, mem, size);
+	}
+	*len += size;
+}
+
+static inline void
+qla27xx_read8(void __iomem *window, void *buf, ulong *len)
+{
+	uint8_t value = ~0;
+
+	if (buf) {
+		value = RD_REG_BYTE(window);
+	}
+	qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read16(void __iomem *window, void *buf, ulong *len)
+{
+	uint16_t value = ~0;
+
+	if (buf) {
+		value = RD_REG_WORD(window);
+	}
+	qla27xx_insert32(value, buf, len);
+}
+
+static inline void
+qla27xx_read32(void __iomem *window, void *buf, ulong *len)
+{
+	uint32_t value = ~0;
+
+	if (buf) {
+		value = RD_REG_DWORD(window);
+	}
+	qla27xx_insert32(value, buf, len);
+}
+
+static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
+{
+	return
+	    (width == 1) ? qla27xx_read8 :
+	    (width == 2) ? qla27xx_read16 :
+			   qla27xx_read32;
+}
+
+static inline void
+qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
+	uint offset, void *buf, ulong *len)
+{
+	void __iomem *window = (void __iomem *)reg + offset;
+
+	qla27xx_read32(window, buf, len);
+}
+
+static inline void
+qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
+	uint offset, uint32_t data, void *buf)
+{
+	__iomem void *window = (void __iomem *)reg + offset;
+
+	if (buf) {
+		WRT_REG_DWORD(window, data);
+	}
+}
+
+static inline void
+qla27xx_read_window(__iomem struct device_reg_24xx *reg,
+	uint32_t addr, uint offset, uint count, uint width, void *buf,
+	ulong *len)
+{
+	void __iomem *window = (void __iomem *)reg + offset;
+	void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
+
+	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
+	while (count--) {
+		qla27xx_insert32(addr, buf, len);
+		readn(window, buf, len);
+		window += width;
+		addr++;
+	}
+}
+
+static inline void
+qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
+{
+	if (buf)
+		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
+}
+
+static int
+qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd100,
+	    "%s: nop [%lx]\n", __func__, *len);
+	qla27xx_skip_entry(ent, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd1ff,
+	    "%s: end [%lx]\n", __func__, *len);
+	qla27xx_skip_entry(ent, buf);
+
+	/* terminate */
+	return true;
+}
+
+static int
+qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd200,
+	    "%s: rdio t1 [%lx]\n", __func__, *len);
+	qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
+	    ent->t256.reg_count, ent->t256.reg_width, buf, len);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd201,
+	    "%s: wrio t1 [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
+	qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd202,
+	    "%s: rdio t2 [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
+	qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
+	    ent->t258.reg_count, ent->t258.reg_width, buf, len);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd203,
+	    "%s: wrio t2 [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
+	qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
+	qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd204,
+	    "%s: rdpci [%lx]\n", __func__, *len);
+	qla27xx_insert32(ent->t260.pci_offset, buf, len);
+	qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd205,
+	    "%s: wrpci [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong dwords;
+	ulong start;
+	ulong end;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd206,
+	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
+	start = ent->t262.start_addr;
+	end = ent->t262.end_addr;
+
+	if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
+		;
+	} else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
+		end = vha->hw->fw_memory_size;
+		if (buf)
+			ent->t262.end_addr = end;
+	} else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
+		start = vha->hw->fw_shared_ram_start;
+		end = vha->hw->fw_shared_ram_end;
+		if (buf) {
+			ent->t262.start_addr = start;
+			ent->t262.end_addr = end;
+		}
+	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+		start = vha->hw->fw_ddr_ram_start;
+		end = vha->hw->fw_ddr_ram_end;
+		if (buf) {
+			ent->t262.start_addr = start;
+			ent->t262.end_addr = end;
+		}
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd022,
+		    "%s: unknown area %x\n", __func__, ent->t262.ram_area);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	if (end < start || start == 0 || end == 0) {
+		ql_dbg(ql_dbg_misc, vha, 0xd023,
+		    "%s: unusable range (start=%x end=%x)\n", __func__,
+		    ent->t262.end_addr, ent->t262.start_addr);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	dwords = end - start + 1;
+	if (buf) {
+		buf += *len;
+		qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+	}
+	*len += dwords * sizeof(uint32_t);
+done:
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	uint count = 0;
+	uint i;
+	uint length;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd207,
+	    "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
+	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+		for (i = 0; i < vha->hw->max_req_queues; i++) {
+			struct req_que *req = vha->hw->req_q_map[i];
+
+			if (req || !buf) {
+				length = req ?
+				    req->length : REQUEST_ENTRY_CNT_24XX;
+				qla27xx_insert16(i, buf, len);
+				qla27xx_insert16(length, buf, len);
+				qla27xx_insertbuf(req ? req->ring : NULL,
+				    length * sizeof(*req->ring), buf, len);
+				count++;
+			}
+		}
+	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+			if (rsp || !buf) {
+				length = rsp ?
+				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
+				qla27xx_insert16(i, buf, len);
+				qla27xx_insert16(length, buf, len);
+				qla27xx_insertbuf(rsp ? rsp->ring : NULL,
+				    length * sizeof(*rsp->ring), buf, len);
+				count++;
+			}
+		}
+	} else if (QLA_TGT_MODE_ENABLED() &&
+	    ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+		struct qla_hw_data *ha = vha->hw;
+		struct atio *atr = ha->tgt.atio_ring;
+
+		if (atr || !buf) {
+			length = ha->tgt.atio_q_length;
+			qla27xx_insert16(0, buf, len);
+			qla27xx_insert16(length, buf, len);
+			qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+			count++;
+		}
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd026,
+		    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
+		qla27xx_skip_entry(ent, buf);
+	}
+
+	if (buf) {
+		if (count)
+			ent->t263.num_queues = count;
+		else
+			qla27xx_skip_entry(ent, buf);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd208,
+	    "%s: getfce [%lx]\n", __func__, *len);
+	if (vha->hw->fce) {
+		if (buf) {
+			ent->t264.fce_trace_size = FCE_SIZE;
+			ent->t264.write_pointer = vha->hw->fce_wr;
+			ent->t264.base_pointer = vha->hw->fce_dma;
+			ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
+			ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
+			ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
+			ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
+			ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
+			ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
+		}
+		qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd027,
+		    "%s: missing fce\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd209,
+	    "%s: pause risc [%lx]\n", __func__, *len);
+	if (buf)
+		qla24xx_pause_risc(reg, vha->hw);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd20a,
+	    "%s: reset risc [%lx]\n", __func__, *len);
+	if (buf)
+		qla24xx_soft_reset(vha->hw);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd20b,
+	    "%s: dis intr [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd20c,
+	    "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
+	if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+		if (vha->hw->eft) {
+			if (buf) {
+				ent->t268.buf_size = EFT_SIZE;
+				ent->t268.start_addr = vha->hw->eft_dma;
+			}
+			qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
+		} else {
+			ql_dbg(ql_dbg_misc, vha, 0xd028,
+			    "%s: missing eft\n", __func__);
+			qla27xx_skip_entry(ent, buf);
+		}
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd02b,
+		    "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
+		qla27xx_skip_entry(ent, buf);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd20d,
+	    "%s: scratch [%lx]\n", __func__, *len);
+	qla27xx_insert32(0xaaaaaaaa, buf, len);
+	qla27xx_insert32(0xbbbbbbbb, buf, len);
+	qla27xx_insert32(0xcccccccc, buf, len);
+	qla27xx_insert32(0xdddddddd, buf, len);
+	qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
+	if (buf)
+		ent->t269.scratch_size = 5 * sizeof(uint32_t);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	ulong dwords = ent->t270.count;
+	ulong addr = ent->t270.addr;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd20e,
+	    "%s: rdremreg [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+	while (dwords--) {
+		qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
+		qla27xx_insert32(addr, buf, len);
+		qla27xx_read_reg(reg, 0xc4, buf, len);
+		addr += sizeof(uint32_t);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
+	ulong addr = ent->t271.addr;
+	ulong data = ent->t271.data;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd20f,
+	    "%s: wrremreg [%lx]\n", __func__, *len);
+	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
+	qla27xx_write_reg(reg, 0xc4, data, buf);
+	qla27xx_write_reg(reg, 0xc0, addr, buf);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong dwords = ent->t272.count;
+	ulong start = ent->t272.addr;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd210,
+	    "%s: rdremram [%lx]\n", __func__, *len);
+	if (buf) {
+		ql_dbg(ql_dbg_misc, vha, 0xd02c,
+		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
+		buf += *len;
+		qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
+	}
+	*len += dwords * sizeof(uint32_t);
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong dwords = ent->t273.count;
+	ulong addr = ent->t273.addr;
+	uint32_t value;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd211,
+	    "%s: pcicfg [%lx]\n", __func__, *len);
+	while (dwords--) {
+		value = ~0;
+		if (pci_read_config_dword(vha->hw->pdev, addr, &value))
+			ql_dbg(ql_dbg_misc, vha, 0xd02d,
+			    "%s: failed pcicfg read at %lx\n", __func__, addr);
+		qla27xx_insert32(addr, buf, len);
+		qla27xx_insert32(value, buf, len);
+		addr += sizeof(uint32_t);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	uint count = 0;
+	uint i;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd212,
+	    "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
+	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+		for (i = 0; i < vha->hw->max_req_queues; i++) {
+			struct req_que *req = vha->hw->req_q_map[i];
+
+			if (req || !buf) {
+				qla27xx_insert16(i, buf, len);
+				qla27xx_insert16(1, buf, len);
+				qla27xx_insert32(req && req->out_ptr ?
+				    *req->out_ptr : 0, buf, len);
+				count++;
+			}
+		}
+	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+			if (rsp || !buf) {
+				qla27xx_insert16(i, buf, len);
+				qla27xx_insert16(1, buf, len);
+				qla27xx_insert32(rsp && rsp->in_ptr ?
+				    *rsp->in_ptr : 0, buf, len);
+				count++;
+			}
+		}
+	} else if (QLA_TGT_MODE_ENABLED() &&
+	    ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+		struct qla_hw_data *ha = vha->hw;
+		struct atio *atr = ha->tgt.atio_ring_ptr;
+
+		if (atr || !buf) {
+			qla27xx_insert16(0, buf, len);
+			qla27xx_insert16(1, buf, len);
+			qla27xx_insert32(ha->tgt.atio_q_in ?
+			    readl(ha->tgt.atio_q_in) : 0, buf, len);
+			count++;
+		}
+	} else {
+		ql_dbg(ql_dbg_misc, vha, 0xd02f,
+		    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
+		qla27xx_skip_entry(ent, buf);
+	}
+
+	if (buf) {
+		if (count)
+			ent->t274.num_queues = count;
+		else
+			qla27xx_skip_entry(ent, buf);
+	}
+
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong offset = offsetof(typeof(*ent), t275.buffer);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd213,
+	    "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
+	if (!ent->t275.length) {
+		ql_dbg(ql_dbg_misc, vha, 0xd020,
+		    "%s: buffer zero length\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+	if (offset + ent->t275.length > ent->hdr.entry_size) {
+		ql_dbg(ql_dbg_misc, vha, 0xd030,
+		    "%s: buffer overflow\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+done:
+	return false;
+}
+
+static int
+qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ql_dbg(ql_dbg_misc, vha, 0xd2ff,
+	    "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
+	qla27xx_skip_entry(ent, buf);
+
+	return false;
+}
+
+struct qla27xx_fwdt_entry_call {
+	uint type;
+	int (*call)(
+	    struct scsi_qla_host *,
+	    struct qla27xx_fwdt_entry *,
+	    void *,
+	    ulong *);
+};
+
+static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
+	{ ENTRY_TYPE_NOP		, qla27xx_fwdt_entry_t0    } ,
+	{ ENTRY_TYPE_TMP_END		, qla27xx_fwdt_entry_t255  } ,
+	{ ENTRY_TYPE_RD_IOB_T1		, qla27xx_fwdt_entry_t256  } ,
+	{ ENTRY_TYPE_WR_IOB_T1		, qla27xx_fwdt_entry_t257  } ,
+	{ ENTRY_TYPE_RD_IOB_T2		, qla27xx_fwdt_entry_t258  } ,
+	{ ENTRY_TYPE_WR_IOB_T2		, qla27xx_fwdt_entry_t259  } ,
+	{ ENTRY_TYPE_RD_PCI		, qla27xx_fwdt_entry_t260  } ,
+	{ ENTRY_TYPE_WR_PCI		, qla27xx_fwdt_entry_t261  } ,
+	{ ENTRY_TYPE_RD_RAM		, qla27xx_fwdt_entry_t262  } ,
+	{ ENTRY_TYPE_GET_QUEUE		, qla27xx_fwdt_entry_t263  } ,
+	{ ENTRY_TYPE_GET_FCE		, qla27xx_fwdt_entry_t264  } ,
+	{ ENTRY_TYPE_PSE_RISC		, qla27xx_fwdt_entry_t265  } ,
+	{ ENTRY_TYPE_RST_RISC		, qla27xx_fwdt_entry_t266  } ,
+	{ ENTRY_TYPE_DIS_INTR		, qla27xx_fwdt_entry_t267  } ,
+	{ ENTRY_TYPE_GET_HBUF		, qla27xx_fwdt_entry_t268  } ,
+	{ ENTRY_TYPE_SCRATCH		, qla27xx_fwdt_entry_t269  } ,
+	{ ENTRY_TYPE_RDREMREG		, qla27xx_fwdt_entry_t270  } ,
+	{ ENTRY_TYPE_WRREMREG		, qla27xx_fwdt_entry_t271  } ,
+	{ ENTRY_TYPE_RDREMRAM		, qla27xx_fwdt_entry_t272  } ,
+	{ ENTRY_TYPE_PCICFG		, qla27xx_fwdt_entry_t273  } ,
+	{ ENTRY_TYPE_GET_SHADOW		, qla27xx_fwdt_entry_t274  } ,
+	{ ENTRY_TYPE_WRITE_BUF		, qla27xx_fwdt_entry_t275  } ,
+	{ -1				, qla27xx_fwdt_entry_other }
+};
+
+static inline int (*qla27xx_find_entry(uint type))
+	(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
+{
+	struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
+
+	while (list->type < type)
+		list++;
+
+	if (list->type == type)
+		return list->call;
+	return qla27xx_fwdt_entry_other;
+}
+
+static inline void *
+qla27xx_next_entry(void *p)
+{
+	struct qla27xx_fwdt_entry *ent = p;
+
+	return p + ent->hdr.entry_size;
+}
+
+static void
+qla27xx_walk_template(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
+{
+	struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
+	ulong count = tmp->entry_count;
+
+	ql_dbg(ql_dbg_misc, vha, 0xd01a,
+	    "%s: entry count %lx\n", __func__, count);
+	while (count--) {
+		if (buf && *len >= vha->hw->fw_dump_len)
+			break;
+		if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
+			break;
+		ent = qla27xx_next_entry(ent);
+	}
+
+	if (count)
+		ql_dbg(ql_dbg_misc, vha, 0xd018,
+		    "%s: entry residual count (%lx)\n", __func__, count);
+
+	if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+		ql_dbg(ql_dbg_misc, vha, 0xd019,
+		    "%s: missing end entry (%lx)\n", __func__, count);
+
+	if (buf && *len != vha->hw->fw_dump_len)
+		ql_dbg(ql_dbg_misc, vha, 0xd01b,
+		    "%s: length=%#lx residual=%+ld\n",
+		    __func__, *len, vha->hw->fw_dump_len - *len);
+
+	if (buf) {
+		ql_log(ql_log_warn, vha, 0xd015,
+		    "Firmware dump saved to temp buffer (%lu/%p)\n",
+		    vha->host_no, vha->hw->fw_dump);
+		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+	}
+}
+
+static void
+qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
+{
+	tmp->capture_timestamp = jiffies;
+}
+
+static void
+qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
+{
+	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
+
+	sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+	    v+0, v+1, v+2, v+3, v+4, v+5);
+
+	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
+	tmp->driver_info[1] = v[5] << 8 | v[4];
+	tmp->driver_info[2] = 0x12345678;
+}
+
+static void
+qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
+	struct scsi_qla_host *vha)
+{
+	tmp->firmware_version[0] = vha->hw->fw_major_version;
+	tmp->firmware_version[1] = vha->hw->fw_minor_version;
+	tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+	tmp->firmware_version[3] =
+	    vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
+	tmp->firmware_version[4] =
+	    vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
+}
+
+static void
+ql27xx_edit_template(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_template *tmp)
+{
+	qla27xx_time_stamp(tmp);
+	qla27xx_driver_info(tmp);
+	qla27xx_firmware_info(tmp, vha);
+}
+
+static inline uint32_t
+qla27xx_template_checksum(void *p, ulong size)
+{
+	uint32_t *buf = p;
+	uint64_t sum = 0;
+
+	size /= sizeof(*buf);
+
+	while (size--)
+		sum += *buf++;
+
+	sum = (sum & 0xffffffff) + (sum >> 32);
+
+	return ~sum;
+}
+
+static inline int
+qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
+{
+	return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
+}
+
+static inline int
+qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
+{
+	return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
+}
+
+static void
+qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
+{
+	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+	ulong len;
+
+	if (qla27xx_fwdt_template_valid(tmp)) {
+		len = tmp->template_size;
+		tmp = memcpy(vha->hw->fw_dump, tmp, len);
+		ql27xx_edit_template(vha, tmp);
+		qla27xx_walk_template(vha, tmp, tmp, &len);
+		vha->hw->fw_dump_len = len;
+		vha->hw->fw_dumped = 1;
+	}
+}
+
+ulong
+qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
+{
+	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
+	ulong len = 0;
+
+	if (qla27xx_fwdt_template_valid(tmp)) {
+		len = tmp->template_size;
+		qla27xx_walk_template(vha, tmp, NULL, &len);
+	}
+
+	return len;
+}
+
+ulong
+qla27xx_fwdt_template_size(void *p)
+{
+	struct qla27xx_fwdt_template *tmp = p;
+
+	return tmp->template_size;
+}
+
+ulong
+qla27xx_fwdt_template_default_size(void)
+{
+	return sizeof(ql27xx_fwdt_default_template);
+}
+
+const void *
+qla27xx_fwdt_template_default(void)
+{
+	return ql27xx_fwdt_default_template;
+}
+
+int
+qla27xx_fwdt_template_valid(void *p)
+{
+	struct qla27xx_fwdt_template *tmp = p;
+
+	if (!qla27xx_verify_template_header(tmp)) {
+		ql_log(ql_log_warn, NULL, 0xd01c,
+		    "%s: template type %x\n", __func__, tmp->template_type);
+		return false;
+	}
+
+	if (!qla27xx_verify_template_checksum(tmp)) {
+		ql_log(ql_log_warn, NULL, 0xd01d,
+		    "%s: failed template checksum\n", __func__);
+		return false;
+	}
+
+	return true;
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+{
+	ulong flags = 0;
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+#endif
+
+	if (!vha->hw->fw_dump)
+		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
+	else if (!vha->hw->fw_dump_template)
+		ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
+	else if (vha->hw->fw_dumped)
+		ql_log(ql_log_warn, vha, 0xd300,
+		    "Firmware has been previously dumped (%p),"
+		    " -- ignoring request\n", vha->hw->fw_dump);
+	else
+		qla27xx_execute_fwdt_template(vha);
+
+#ifndef __CHECKER__
+	if (!hardware_locked)
+		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+#endif
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_tmpl.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 0000000..141c1c5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,224 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_DMP27_H__
+#define	__QLA_DMP27_H__
+
+#define IOBASE_ADDR	offsetof(struct device_reg_24xx, iobase_addr)
+
+struct __packed qla27xx_fwdt_template {
+	uint32_t template_type;
+	uint32_t entry_offset;
+	uint32_t template_size;
+	uint32_t reserved_1;
+
+	uint32_t entry_count;
+	uint32_t template_version;
+	uint32_t capture_timestamp;
+	uint32_t template_checksum;
+
+	uint32_t reserved_2;
+	uint32_t driver_info[3];
+
+	uint32_t saved_state[16];
+
+	uint32_t reserved_3[8];
+	uint32_t firmware_version[5];
+};
+
+#define TEMPLATE_TYPE_FWDUMP		99
+
+#define ENTRY_TYPE_NOP			0
+#define ENTRY_TYPE_TMP_END		255
+#define ENTRY_TYPE_RD_IOB_T1		256
+#define ENTRY_TYPE_WR_IOB_T1		257
+#define ENTRY_TYPE_RD_IOB_T2		258
+#define ENTRY_TYPE_WR_IOB_T2		259
+#define ENTRY_TYPE_RD_PCI		260
+#define ENTRY_TYPE_WR_PCI		261
+#define ENTRY_TYPE_RD_RAM		262
+#define ENTRY_TYPE_GET_QUEUE		263
+#define ENTRY_TYPE_GET_FCE		264
+#define ENTRY_TYPE_PSE_RISC		265
+#define ENTRY_TYPE_RST_RISC		266
+#define ENTRY_TYPE_DIS_INTR		267
+#define ENTRY_TYPE_GET_HBUF		268
+#define ENTRY_TYPE_SCRATCH		269
+#define ENTRY_TYPE_RDREMREG		270
+#define ENTRY_TYPE_WRREMREG		271
+#define ENTRY_TYPE_RDREMRAM		272
+#define ENTRY_TYPE_PCICFG		273
+#define ENTRY_TYPE_GET_SHADOW		274
+#define ENTRY_TYPE_WRITE_BUF		275
+
+#define CAPTURE_FLAG_PHYS_ONLY		BIT_0
+#define CAPTURE_FLAG_PHYS_VIRT		BIT_1
+
+#define DRIVER_FLAG_SKIP_ENTRY		BIT_7
+
+struct __packed qla27xx_fwdt_entry {
+	struct __packed {
+		uint32_t entry_type;
+		uint32_t entry_size;
+		uint32_t reserved_1;
+
+		uint8_t  capture_flags;
+		uint8_t  reserved_2[2];
+		uint8_t  driver_flags;
+	} hdr;
+	union __packed {
+		struct __packed {
+		} t0;
+
+		struct __packed {
+		} t255;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint8_t  reg_width;
+			uint16_t reg_count;
+			uint8_t  pci_offset;
+		} t256;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint32_t write_data;
+			uint8_t  pci_offset;
+			uint8_t  reserved[3];
+		} t257;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint8_t  reg_width;
+			uint16_t reg_count;
+			uint8_t  pci_offset;
+			uint8_t  banksel_offset;
+			uint8_t  reserved[3];
+			uint32_t bank;
+		} t258;
+
+		struct __packed {
+			uint32_t base_addr;
+			uint32_t write_data;
+			uint8_t  reserved[2];
+			uint8_t  pci_offset;
+			uint8_t  banksel_offset;
+			uint32_t bank;
+		} t259;
+
+		struct __packed {
+			uint8_t pci_offset;
+			uint8_t reserved[3];
+		} t260;
+
+		struct __packed {
+			uint8_t pci_offset;
+			uint8_t reserved[3];
+			uint32_t write_data;
+		} t261;
+
+		struct __packed {
+			uint8_t  ram_area;
+			uint8_t  reserved[3];
+			uint32_t start_addr;
+			uint32_t end_addr;
+		} t262;
+
+		struct __packed {
+			uint32_t num_queues;
+			uint8_t  queue_type;
+			uint8_t  reserved[3];
+		} t263;
+
+		struct __packed {
+			uint32_t fce_trace_size;
+			uint64_t write_pointer;
+			uint64_t base_pointer;
+			uint32_t fce_enable_mb0;
+			uint32_t fce_enable_mb2;
+			uint32_t fce_enable_mb3;
+			uint32_t fce_enable_mb4;
+			uint32_t fce_enable_mb5;
+			uint32_t fce_enable_mb6;
+		} t264;
+
+		struct __packed {
+		} t265;
+
+		struct __packed {
+		} t266;
+
+		struct __packed {
+			uint8_t  pci_offset;
+			uint8_t  reserved[3];
+			uint32_t data;
+		} t267;
+
+		struct __packed {
+			uint8_t  buf_type;
+			uint8_t  reserved[3];
+			uint32_t buf_size;
+			uint64_t start_addr;
+		} t268;
+
+		struct __packed {
+			uint32_t scratch_size;
+		} t269;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t count;
+		} t270;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t data;
+		} t271;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t count;
+		} t272;
+
+		struct __packed {
+			uint32_t addr;
+			uint32_t count;
+		} t273;
+
+		struct __packed {
+			uint32_t num_queues;
+			uint8_t  queue_type;
+			uint8_t  reserved[3];
+		} t274;
+
+		struct __packed {
+			uint32_t length;
+			uint8_t  buffer[];
+		} t275;
+	};
+};
+
+#define T262_RAM_AREA_CRITICAL_RAM	1
+#define T262_RAM_AREA_EXTERNAL_RAM	2
+#define T262_RAM_AREA_SHARED_RAM	3
+#define T262_RAM_AREA_DDR_RAM		4
+
+#define T263_QUEUE_TYPE_REQ		1
+#define T263_QUEUE_TYPE_RSP		2
+#define T263_QUEUE_TYPE_ATIO		3
+
+#define T268_BUF_TYPE_EXTD_TRACE	1
+#define T268_BUF_TYPE_EXCH_BUFOFF	2
+#define T268_BUF_TYPE_EXTD_LOGIN	3
+#define T268_BUF_TYPE_REQ_MIRROR	4
+#define T268_BUF_TYPE_RSP_MIRROR	5
+
+#define T274_QUEUE_TYPE_REQ_SHAD	1
+#define T274_QUEUE_TYPE_RSP_SHAD	2
+#define T274_QUEUE_TYPE_ATIO_SHAD	3
+
+#endif
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_version.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_version.h
new file mode 100644
index 0000000..8c4b505
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/qla_version.h
@@ -0,0 +1,15 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2014 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+/*
+ * Driver version
+ */
+#define QLA2XXX_VERSION      "10.00.00.01-k"
+
+#define QLA_DRIVER_MAJOR_VER	10
+#define QLA_DRIVER_MINOR_VER	0
+#define QLA_DRIVER_PATCH_VER	0
+#define QLA_DRIVER_BETA_VER	0
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 0000000..e7aee06
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1996 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * (c) Copyright 2010-2013 Datera, Inc.
+ *
+ * Author: Nicholas A. Bellinger <nab@daterainc.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/utsname.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+static struct workqueue_struct *tcm_qla2xxx_free_wq;
+static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+	const char *cp;
+	char c;
+	u32 nibble;
+	u32 byte = 0;
+	u32 pos = 0;
+	u32 err;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (strict && pos++ == 2 && byte++ < 7) {
+			pos = 0;
+			if (c == ':')
+				continue;
+			err = 1;
+			goto fail;
+		}
+		if (c == '\0') {
+			err = 2;
+			if (strict && byte != 8)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		if (isdigit(c))
+			nibble = c - '0';
+		else if (isxdigit(c) && (islower(c) || !strict))
+			nibble = tolower(c) - 'a' + 10;
+		else
+			goto fail;
+		*wwn = (*wwn << 4) | nibble;
+	}
+	err = 4;
+fail:
+	pr_debug("err %u len %zu pos %u byte %u\n",
+			err, cp - name, pos, byte);
+	return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+	u8 b[8];
+
+	put_unaligned_be64(wwn, b);
+	return snprintf(buf, len,
+		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+	return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+	unsigned int i, j;
+	u8 wwn[8];
+
+	memset(wwn, 0, sizeof(wwn));
+
+	/* Validate and store the new name */
+	for (i = 0, j = 0; i < 16; i++) {
+		int value;
+
+		value = hex_to_bin(*ns++);
+		if (value >= 0)
+			j = (j << 4) | value;
+		else
+			return -EINVAL;
+
+		if (i % 2) {
+			wwn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+
+	*nm = wwn_to_u64(wwn);
+	return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+	const char *name,
+	size_t count,
+	u64 *wwpn,
+	u64 *wwnn)
+{
+	unsigned int cnt = count;
+	int rc;
+
+	*wwpn = 0;
+	*wwnn = 0;
+
+	/* count may include a LF at end of string */
+	if (name[cnt-1] == '\n' || name[cnt-1] == 0)
+		cnt--;
+
+	/* validate we have enough characters for WWPN */
+	if ((cnt != (16+1+16)) || (name[16] != ':'))
+		return -EINVAL;
+
+	rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+	if (rc != 0)
+		return rc;
+
+	rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+	if (rc != 0)
+		return rc;
+
+	return 0;
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+	return "qla2xxx_npiv";
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+	return lport->lport_naa_name;
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	return tpg->lport_tpgt;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->tpg_attrib.generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->tpg_attrib.cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->tpg_attrib.fabric_prot_type;
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+	struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+			struct qla_tgt_mgmt_cmd, free_work);
+
+	transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+	INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+	queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+	cmd->cmd_in_wq = 0;
+
+	WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
+
+	cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
+	cmd->trc_flags |= TRC_CMD_FREE;
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback.  qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+	cmd->qpair->tgt_counters.core_qla_free_cmd++;
+	cmd->cmd_in_wq = 1;
+
+	WARN_ON(cmd->trc_flags & TRC_CMD_DONE);
+	cmd->trc_flags |= TRC_CMD_DONE;
+
+	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd;
+
+	if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
+		cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+		cmd->trc_flags |= TRC_CMD_CHK_STOP;
+	}
+
+	return target_put_sess_cmd(se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd;
+
+	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+		struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+				struct qla_tgt_mgmt_cmd, se_cmd);
+		qlt_free_mcmd(mcmd);
+		return;
+	}
+
+	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+	qlt_free_cmd(cmd);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+	struct fc_port  *sess = container_of(kref,
+	    struct fc_port, sess_kref);
+
+	qlt_unreg_sess(sess);
+}
+
+static void tcm_qla2xxx_put_sess(struct fc_port *sess)
+{
+	if (!sess)
+		return;
+
+	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+	kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+	struct fc_port *sess = se_sess->fabric_sess_ptr;
+	struct scsi_qla_host *vha;
+	unsigned long flags;
+
+	BUG_ON(!sess);
+	vha = sess->vha;
+
+	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+	target_sess_cmd_list_set_waiting(se_sess);
+	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+	tcm_qla2xxx_put_sess(sess);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+
+	if (cmd->aborted) {
+		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
+		 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+		 * already kick start the free.
+		 */
+		pr_debug("write_pending aborted cmd[%p] refcount %d "
+			"transport_state %x, t_state %x, se_cmd_flags %x\n",
+			cmd, kref_read(&cmd->se_cmd.cmd_kref),
+			cmd->se_cmd.transport_state,
+			cmd->se_cmd.t_state,
+			cmd->se_cmd.se_cmd_flags);
+		transport_generic_request_failure(&cmd->se_cmd,
+			TCM_CHECK_CONDITION_ABORT_CMD);
+		return 0;
+	}
+	cmd->trc_flags |= TRC_XFR_RDY;
+	cmd->bufflen = se_cmd->data_length;
+	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+
+	cmd->sg_cnt = se_cmd->t_data_nents;
+	cmd->sg = se_cmd->t_data_sg;
+
+	cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+	cmd->prot_sg = se_cmd->t_prot_sg;
+	cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
+	se_cmd->pi_err = 0;
+
+	/*
+	 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+	 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+	 */
+	return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+	/*
+	 * Check for WRITE_PENDING status to determine if we need to wait for
+	 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+	 */
+	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+						50);
+		return 0;
+	}
+	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+	return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+	if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+		struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+		return cmd->state;
+	}
+
+	return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+	unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+	int data_dir, int bidi)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct se_session *se_sess;
+	struct fc_port *sess;
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+	struct se_portal_group *se_tpg;
+	struct tcm_qla2xxx_tpg *tpg;
+#endif
+	int flags = TARGET_SCF_ACK_KREF;
+
+	if (bidi)
+		flags |= TARGET_SCF_BIDI_OP;
+
+	if (se_cmd->cpuid != WORK_CPU_UNBOUND)
+		flags |= TARGET_SCF_USE_CPUID;
+
+	sess = cmd->sess;
+	if (!sess) {
+		pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n");
+		return -EINVAL;
+	}
+
+	se_sess = sess->se_sess;
+	if (!se_sess) {
+		pr_err("Unable to locate active struct se_session\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+	se_tpg = se_sess->se_tpg;
+	tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg);
+	if (unlikely(tpg->tpg_attrib.jam_host)) {
+		/* return, and dont run target_submit_cmd,discarding command */
+		return 0;
+	}
+#endif
+
+	cmd->qpair->tgt_counters.qla_core_sbt_cmd++;
+	return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+				cmd->unpacked_lun, data_length, fcp_task_attr,
+				data_dir, flags);
+}
+
+static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
+{
+	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+	/*
+	 * Ensure that the complete FCP WRITE payload has been received.
+	 * Otherwise return an exception via CHECK_CONDITION status.
+	 */
+	cmd->cmd_in_wq = 0;
+
+	cmd->qpair->tgt_counters.qla_core_ret_ctio++;
+	if (!cmd->write_data_transferred) {
+		/*
+		 * Check if se_cmd has already been aborted via LUN_RESET, and
+		 * waiting upon completion in tcm_qla2xxx_write_pending_status()
+		 */
+		if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
+			complete(&cmd->se_cmd.t_transport_stop_comp);
+			return;
+		}
+
+		switch (cmd->dif_err_code) {
+		case DIF_ERR_GRD:
+			cmd->se_cmd.pi_err =
+			    TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+			break;
+		case DIF_ERR_REF:
+			cmd->se_cmd.pi_err =
+			    TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+			break;
+		case DIF_ERR_APP:
+			cmd->se_cmd.pi_err =
+			    TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+			break;
+		case DIF_ERR_NONE:
+		default:
+			break;
+		}
+
+		if (cmd->se_cmd.pi_err)
+			transport_generic_request_failure(&cmd->se_cmd,
+				cmd->se_cmd.pi_err);
+		else
+			transport_generic_request_failure(&cmd->se_cmd,
+				TCM_CHECK_CONDITION_ABORT_CMD);
+
+		return;
+	}
+
+	return target_execute_cmd(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+	cmd->trc_flags |= TRC_DATA_IN;
+	cmd->cmd_in_wq = 1;
+	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
+	queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
+{
+	return 0;
+}
+
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+    uint16_t *pfw_prot_opts)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+		*pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+	if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+		*pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+	return 0;
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
+	uint16_t tmr_func, uint32_t tag)
+{
+	struct fc_port *sess = mcmd->sess;
+	struct se_cmd *se_cmd = &mcmd->se_cmd;
+	int transl_tmr_func = 0;
+	int flags = TARGET_SCF_ACK_KREF;
+
+	switch (tmr_func) {
+	case QLA_TGT_ABTS:
+		pr_debug("%ld: ABTS received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_ABORT_TASK;
+		flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG;
+		break;
+	case QLA_TGT_2G_ABORT_TASK:
+		pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_ABORT_TASK;
+		break;
+	case QLA_TGT_CLEAR_ACA:
+		pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_CLEAR_ACA;
+		break;
+	case QLA_TGT_TARGET_RESET:
+		pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_TARGET_WARM_RESET;
+		break;
+	case QLA_TGT_LUN_RESET:
+		pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_LUN_RESET;
+		break;
+	case QLA_TGT_CLEAR_TS:
+		pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_CLEAR_TASK_SET;
+		break;
+	case QLA_TGT_ABORT_TS:
+		pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no);
+		transl_tmr_func = TMR_ABORT_TASK_SET;
+		break;
+	default:
+		pr_debug("%ld: Unknown task mgmt fn 0x%x\n",
+		    sess->vha->host_no, tmr_func);
+		return -ENOSYS;
+	}
+
+	return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+	    transl_tmr_func, GFP_ATOMIC, tag, flags);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+
+	if (cmd->aborted) {
+		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
+		 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+		 * already kick start the free.
+		 */
+		pr_debug("queue_data_in aborted cmd[%p] refcount %d "
+			"transport_state %x, t_state %x, se_cmd_flags %x\n",
+			cmd, kref_read(&cmd->se_cmd.cmd_kref),
+			cmd->se_cmd.transport_state,
+			cmd->se_cmd.t_state,
+			cmd->se_cmd.se_cmd_flags);
+		return 0;
+	}
+
+	cmd->trc_flags |= TRC_XMIT_DATA;
+	cmd->bufflen = se_cmd->data_length;
+	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+
+	cmd->sg_cnt = se_cmd->t_data_nents;
+	cmd->sg = se_cmd->t_data_sg;
+	cmd->offset = 0;
+
+	cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+	cmd->prot_sg = se_cmd->t_prot_sg;
+	cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
+	se_cmd->pi_err = 0;
+
+	/*
+	 * Now queue completed DATA_IN the qla2xxx LLD and response ring
+	 */
+	return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+				se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+	int xmit_type = QLA_TGT_XMIT_STATUS;
+
+	if (cmd->aborted) {
+		/*
+		 * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+		 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+		 * already kick start the free.
+		 */
+		pr_debug(
+		    "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n",
+		    cmd, kref_read(&cmd->se_cmd.cmd_kref),
+		    cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
+		    cmd->se_cmd.se_cmd_flags);
+		return 0;
+	}
+	cmd->bufflen = se_cmd->data_length;
+	cmd->sg = NULL;
+	cmd->sg_cnt = 0;
+	cmd->offset = 0;
+	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+	cmd->trc_flags |= TRC_XMIT_STATUS;
+
+	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+		/*
+		 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+		 * for qla_tgt_xmit_response LLD code
+		 */
+		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+			se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
+			se_cmd->residual_count = 0;
+		}
+		se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+		se_cmd->residual_count += se_cmd->data_length;
+
+		cmd->bufflen = 0;
+	}
+	/*
+	 * Now queue status response to qla2xxx LLD code and response ring
+	 */
+	return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+				struct qla_tgt_mgmt_cmd, se_cmd);
+
+	pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+			mcmd, se_tmr->function, se_tmr->response);
+	/*
+	 * Do translation between TCM TM response codes and
+	 * QLA2xxx FC TM response codes.
+	 */
+	switch (se_tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+		break;
+	case TMR_TASK_DOES_NOT_EXIST:
+		mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+		break;
+	case TMR_FUNCTION_REJECTED:
+		mcmd->fc_tm_rsp = FC_TM_REJECT;
+		break;
+	case TMR_LUN_DOES_NOT_EXIST:
+	default:
+		mcmd->fc_tm_rsp = FC_TM_FAILED;
+		break;
+	}
+	/*
+	 * Queue the TM response to QLA2xxx LLD to build a
+	 * CTIO response packet.
+	 */
+	qlt_xmit_tm_rsp(mcmd);
+}
+
+static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+{
+	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+
+	if (qlt_abort_cmd(cmd))
+		return;
+}
+
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+			struct tcm_qla2xxx_nacl *, struct fc_port *);
+/*
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
+{
+	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+				struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+	void *node;
+
+	pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+	node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+	if (WARN_ON(node && (node != se_nacl))) {
+		/*
+		 * The nacl no longer matches what we think it should be.
+		 * Most likely a new dynamic acl has been added while
+		 * someone dropped the hardware lock.  It clearly is a
+		 * bug elsewhere, but this bit can't make things worse.
+		 */
+		btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
+			       node, GFP_ATOMIC);
+	}
+
+	pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+	    se_nacl, nacl->nport_wwnn, nacl->nport_id);
+	/*
+	 * Now clear the se_nacl and session pointers from our HW lport lookup
+	 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+	 *
+	 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+	 * target_wait_for_sess_cmds() before the session waits for outstanding
+	 * I/O to complete, to avoid a race between session shutdown execution
+	 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+	 */
+	tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
+{
+	assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+	target_sess_cmd_list_set_waiting(sess->se_sess);
+}
+
+static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl,
+		const char *name)
+{
+	struct tcm_qla2xxx_nacl *nacl =
+		container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	u64 wwnn;
+
+	if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+		return -EINVAL;
+
+	nacl->nport_wwnn = wwnn;
+	tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+
+	return 0;
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name)					\
+									\
+static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show(			\
+		struct config_item *item, char *page)			\
+{									\
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);		\
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
+			struct tcm_qla2xxx_tpg, se_tpg);		\
+									\
+	return sprintf(page, "%u\n", tpg->tpg_attrib.name);	\
+}									\
+									\
+static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store(			\
+		struct config_item *item, const char *page, size_t count) \
+{									\
+	struct se_portal_group *se_tpg = attrib_to_tpg(item);		\
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\
+			struct tcm_qla2xxx_tpg, se_tpg);		\
+	struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib;		\
+	unsigned long val;						\
+	int ret;							\
+									\
+	ret = kstrtoul(page, 0, &val);					\
+	if (ret < 0) {							\
+		pr_err("kstrtoul() failed with"				\
+				" ret: %d\n", ret);			\
+		return -EINVAL;						\
+	}								\
+									\
+	if ((val != 0) && (val != 1)) {					\
+		pr_err("Illegal boolean value %lu\n", val);		\
+		return -EINVAL;						\
+	}								\
+									\
+	a->name = val;							\
+									\
+	return count;							\
+}									\
+CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name)
+
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+DEF_QLA_TPG_ATTRIB(jam_host);
+#endif
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+	&tcm_qla2xxx_tpg_attrib_attr_generate_node_acls,
+	&tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls,
+	&tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect,
+	&tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect,
+	&tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only,
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+	&tcm_qla2xxx_tpg_attrib_attr_jam_host,
+#endif
+	NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
+		char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+
+	return snprintf(page, PAGE_SIZE, "%d\n",
+			atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	unsigned long op;
+	int rc;
+
+	rc = kstrtoul(page, 0, &op);
+	if (rc < 0) {
+		pr_err("kstrtoul() returned %d\n", rc);
+		return -EINVAL;
+	}
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %lu\n", op);
+		return -EINVAL;
+	}
+	if (op) {
+		if (atomic_read(&tpg->lport_tpg_enabled))
+			return -EEXIST;
+
+		atomic_set(&tpg->lport_tpg_enabled, 1);
+		qlt_enable_vha(vha);
+	} else {
+		if (!atomic_read(&tpg->lport_tpg_enabled))
+			return count;
+
+		atomic_set(&tpg->lport_tpg_enabled, 0);
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+		qlt_stop_phase2(vha->vha_tgt.qla_tgt);
+	}
+
+	return count;
+}
+
+static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item,
+		char *page)
+{
+	return target_show_dynamic_sessions(to_tpg(item), page);
+}
+
+static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	unsigned long val;
+	int ret = kstrtoul(page, 0, &val);
+
+	if (ret) {
+		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+		return ret;
+	}
+	if (val != 0 && val != 1 && val != 3) {
+		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+		return -EINVAL;
+	}
+	tpg->tpg_attrib.fabric_prot_type = val;
+
+	return count;
+}
+
+static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
+		char *page)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+
+	return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
+}
+
+CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
+CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
+CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+	&tcm_qla2xxx_tpg_attr_enable,
+	&tcm_qla2xxx_tpg_attr_dynamic_sessions,
+	&tcm_qla2xxx_tpg_attr_fabric_prot_type,
+	NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	if ((tpgt != 1)) {
+		pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+	/*
+	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+	 * NodeACLs
+	 */
+	tpg->tpg_attrib.generate_node_acls = 1;
+	tpg->tpg_attrib.demo_mode_write_protect = 1;
+	tpg->tpg_attrib.cache_dynamic_acls = 1;
+	tpg->tpg_attrib.demo_mode_login_only = 1;
+	tpg->tpg_attrib.jam_host = 0;
+
+	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+
+	lport->tpg_1 = tpg;
+
+	return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	struct scsi_qla_host *vha = lport->qla_vha;
+	/*
+	 * Call into qla2x_target.c LLD logic to shutdown the active
+	 * FC Nexuses and disable target mode operation for this qla_hw_data
+	 */
+	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+
+	core_tpg_deregister(se_tpg);
+	/*
+	 * Clear local TPG=1 pointer for non NPIV mode.
+	 */
+	lport->tpg_1 = NULL;
+	kfree(tpg);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item,
+		char *page)
+{
+	return tcm_qla2xxx_tpg_enable_show(item, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct se_portal_group *se_tpg = to_tpg(item);
+	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+	struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+			struct tcm_qla2xxx_tpg, se_tpg);
+	unsigned long op;
+	int rc;
+
+	rc = kstrtoul(page, 0, &op);
+	if (rc < 0) {
+		pr_err("kstrtoul() returned %d\n", rc);
+		return -EINVAL;
+	}
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %lu\n", op);
+		return -EINVAL;
+	}
+	if (op) {
+		if (atomic_read(&tpg->lport_tpg_enabled))
+			return -EEXIST;
+
+		atomic_set(&tpg->lport_tpg_enabled, 1);
+		qlt_enable_vha(vha);
+	} else {
+		if (!atomic_read(&tpg->lport_tpg_enabled))
+			return count;
+
+		atomic_set(&tpg->lport_tpg_enabled, 0);
+		qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+		qlt_stop_phase2(vha->vha_tgt.qla_tgt);
+	}
+
+	return count;
+}
+
+CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+        &tcm_qla2xxx_npiv_tpg_attr_enable,
+        NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct tcm_qla2xxx_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tpg->lport = lport;
+	tpg->lport_tpgt = tpgt;
+
+	/*
+	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+	 * NodeACLs
+	 */
+	tpg->tpg_attrib.generate_node_acls = 1;
+	tpg->tpg_attrib.demo_mode_write_protect = 1;
+	tpg->tpg_attrib.cache_dynamic_acls = 1;
+	tpg->tpg_attrib.demo_mode_login_only = 1;
+
+	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
+	if (ret < 0) {
+		kfree(tpg);
+		return NULL;
+	}
+	lport->tpg_1 = tpg;
+	return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
+ */
+static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(
+	scsi_qla_host_t *vha,
+	const uint8_t *s_id)
+{
+	struct tcm_qla2xxx_lport *lport;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_nacl *nacl;
+	u32 key;
+
+	lport = vha->vha_tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return NULL;
+	}
+
+	key = sid_to_key(s_id);
+	pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+	se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+	if (!se_nacl) {
+		pr_debug("Unable to locate s_id: 0x%06x\n", key);
+		return NULL;
+	}
+	pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+	    se_nacl, se_nacl->initiatorname);
+
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+	if (!nacl->fc_port) {
+		pr_err("Unable to locate struct fc_port\n");
+		return NULL;
+	}
+
+	return nacl->fc_port;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+	struct tcm_qla2xxx_lport *lport,
+	struct se_node_acl *new_se_nacl,
+	struct tcm_qla2xxx_nacl *nacl,
+	struct se_session *se_sess,
+	struct fc_port *fc_port,
+	uint8_t *s_id)
+{
+	u32 key;
+	void *slot;
+	int rc;
+
+	key = sid_to_key(s_id);
+	pr_debug("set_sess_by_s_id: %06x\n", key);
+
+	slot = btree_lookup32(&lport->lport_fcport_map, key);
+	if (!slot) {
+		if (new_se_nacl) {
+			pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+			nacl->nport_id = key;
+			rc = btree_insert32(&lport->lport_fcport_map, key,
+					new_se_nacl, GFP_ATOMIC);
+			if (rc)
+				printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+				    (int)key);
+		} else {
+			pr_debug("Wiping nonexisting fc_port entry\n");
+		}
+
+		fc_port->se_sess = se_sess;
+		nacl->fc_port = fc_port;
+		return;
+	}
+
+	if (nacl->fc_port) {
+		if (new_se_nacl == NULL) {
+			pr_debug("Clearing existing nacl->fc_port and fc_port entry\n");
+			btree_remove32(&lport->lport_fcport_map, key);
+			nacl->fc_port = NULL;
+			return;
+		}
+		pr_debug("Replacing existing nacl->fc_port and fc_port entry\n");
+		btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+		fc_port->se_sess = se_sess;
+		nacl->fc_port = fc_port;
+		return;
+	}
+
+	if (new_se_nacl == NULL) {
+		pr_debug("Clearing existing fc_port entry\n");
+		btree_remove32(&lport->lport_fcport_map, key);
+		return;
+	}
+
+	pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n");
+	btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+	fc_port->se_sess = se_sess;
+	nacl->fc_port = fc_port;
+
+	pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n",
+	    nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
+ */
+static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id(
+	scsi_qla_host_t *vha,
+	const uint16_t loop_id)
+{
+	struct tcm_qla2xxx_lport *lport;
+	struct se_node_acl *se_nacl;
+	struct tcm_qla2xxx_nacl *nacl;
+	struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+	lport = vha->vha_tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return NULL;
+	}
+
+	pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+	fc_loopid = lport->lport_loopid_map + loop_id;
+	se_nacl = fc_loopid->se_nacl;
+	if (!se_nacl) {
+		pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+		    loop_id);
+		return NULL;
+	}
+
+	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+	if (!nacl->fc_port) {
+		pr_err("Unable to locate struct fc_port\n");
+		return NULL;
+	}
+
+	return nacl->fc_port;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->tgt.sess_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+	struct tcm_qla2xxx_lport *lport,
+	struct se_node_acl *new_se_nacl,
+	struct tcm_qla2xxx_nacl *nacl,
+	struct se_session *se_sess,
+	struct fc_port *fc_port,
+	uint16_t loop_id)
+{
+	struct se_node_acl *saved_nacl;
+	struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+	pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+	fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+			lport->lport_loopid_map)[loop_id];
+
+	saved_nacl = fc_loopid->se_nacl;
+	if (!saved_nacl) {
+		pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+		fc_loopid->se_nacl = new_se_nacl;
+		if (fc_port->se_sess != se_sess)
+			fc_port->se_sess = se_sess;
+		if (nacl->fc_port != fc_port)
+			nacl->fc_port = fc_port;
+		return;
+	}
+
+	if (nacl->fc_port) {
+		if (new_se_nacl == NULL) {
+			pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n");
+			fc_loopid->se_nacl = NULL;
+			nacl->fc_port = NULL;
+			return;
+		}
+
+		pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n");
+		fc_loopid->se_nacl = new_se_nacl;
+		if (fc_port->se_sess != se_sess)
+			fc_port->se_sess = se_sess;
+		if (nacl->fc_port != fc_port)
+			nacl->fc_port = fc_port;
+		return;
+	}
+
+	if (new_se_nacl == NULL) {
+		pr_debug("Clearing fc_loopid->se_nacl\n");
+		fc_loopid->se_nacl = NULL;
+		return;
+	}
+
+	pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n");
+	fc_loopid->se_nacl = new_se_nacl;
+	if (fc_port->se_sess != se_sess)
+		fc_port->se_sess = se_sess;
+	if (nacl->fc_port != fc_port)
+		nacl->fc_port = fc_port;
+
+	pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+	    nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Should always be called with qla_hw_data->tgt.sess_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+		struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess)
+{
+	struct se_session *se_sess = sess->se_sess;
+	unsigned char be_sid[3];
+
+	be_sid[0] = sess->d_id.b.domain;
+	be_sid[1] = sess->d_id.b.area;
+	be_sid[2] = sess->d_id.b.al_pa;
+
+	tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+				sess, be_sid);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+				sess, sess->loop_id);
+}
+
+static void tcm_qla2xxx_free_session(struct fc_port *sess)
+{
+	struct qla_tgt *tgt = sess->tgt;
+	struct qla_hw_data *ha = tgt->ha;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	struct se_session *se_sess;
+	struct tcm_qla2xxx_lport *lport;
+
+	BUG_ON(in_interrupt());
+
+	se_sess = sess->se_sess;
+	if (!se_sess) {
+		pr_err("struct fc_port->se_sess is NULL\n");
+		dump_stack();
+		return;
+	}
+
+	lport = vha->vha_tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return;
+	}
+	target_wait_for_sess_cmds(se_sess);
+
+	transport_deregister_session_configfs(sess->se_sess);
+	transport_deregister_session(sess->se_sess);
+}
+
+static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
+				  struct se_session *se_sess, void *p)
+{
+	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+				struct tcm_qla2xxx_tpg, se_tpg);
+	struct tcm_qla2xxx_lport *lport = tpg->lport;
+	struct qla_hw_data *ha = lport->qla_vha->hw;
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+				struct tcm_qla2xxx_nacl, se_node_acl);
+	struct fc_port *qlat_sess = p;
+	uint16_t loop_id = qlat_sess->loop_id;
+	unsigned long flags;
+	unsigned char be_sid[3];
+
+	be_sid[0] = qlat_sess->d_id.b.domain;
+	be_sid[1] = qlat_sess->d_id.b.area;
+	be_sid[2] = qlat_sess->d_id.b.al_pa;
+
+	/*
+	 * And now setup se_nacl and session pointers into HW lport internal
+	 * mappings for fabric S_ID and LOOP_ID.
+	 */
+	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+	tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
+				     se_sess, qlat_sess, be_sid);
+	tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
+					se_sess, qlat_sess, loop_id);
+	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+	return 0;
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+	scsi_qla_host_t *vha,
+	unsigned char *fc_wwpn,
+	struct fc_port *qlat_sess)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport;
+	struct tcm_qla2xxx_tpg *tpg;
+	struct se_session *se_sess;
+	unsigned char port_name[36];
+	int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
+		       TCM_QLA2XXX_DEFAULT_TAGS;
+
+	lport = vha->vha_tgt.target_lport_ptr;
+	if (!lport) {
+		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+		dump_stack();
+		return -EINVAL;
+	}
+	/*
+	 * Locate the TPG=1 reference..
+	 */
+	tpg = lport->tpg_1;
+	if (!tpg) {
+		pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+		return -EINVAL;
+	}
+	/*
+	 * Format the FCP Initiator port_name into colon seperated values to
+	 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+	 */
+	memset(&port_name, 0, 36);
+	snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
+	/*
+	 * Locate our struct se_node_acl either from an explict NodeACL created
+	 * via ConfigFS, or via running in TPG demo mode.
+	 */
+	se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
+				       sizeof(struct qla_tgt_cmd),
+				       TARGET_PROT_ALL, port_name,
+				       qlat_sess, tcm_qla2xxx_session_cb);
+	if (IS_ERR(se_sess))
+		return PTR_ERR(se_sess);
+
+	return 0;
+}
+
+static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
+				    uint16_t loop_id, bool conf_compl_supported)
+{
+	struct qla_tgt *tgt = sess->tgt;
+	struct qla_hw_data *ha = tgt->ha;
+	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+	struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
+	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+			struct tcm_qla2xxx_nacl, se_node_acl);
+	u32 key;
+
+
+	if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24)
+		pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
+		    sess, sess->port_name,
+		    sess->loop_id, loop_id, sess->d_id.b.domain,
+		    sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain,
+		    s_id.b.area, s_id.b.al_pa);
+
+	if (sess->loop_id != loop_id) {
+		/*
+		 * Because we can shuffle loop IDs around and we
+		 * update different sessions non-atomically, we might
+		 * have overwritten this session's old loop ID
+		 * already, and we might end up overwriting some other
+		 * session that will be updated later.  So we have to
+		 * be extra careful and we can't warn about those things...
+		 */
+		if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
+			lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
+
+		lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
+
+		sess->loop_id = loop_id;
+	}
+
+	if (sess->d_id.b24 != s_id.b24) {
+		key = (((u32) sess->d_id.b.domain << 16) |
+		       ((u32) sess->d_id.b.area   <<  8) |
+		       ((u32) sess->d_id.b.al_pa));
+
+		if (btree_lookup32(&lport->lport_fcport_map, key))
+			WARN(btree_remove32(&lport->lport_fcport_map, key) !=
+			    se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n",
+			    sess->d_id.b.domain, sess->d_id.b.area,
+			    sess->d_id.b.al_pa);
+		else
+			WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
+			     sess->d_id.b.domain, sess->d_id.b.area,
+			     sess->d_id.b.al_pa);
+
+		key = (((u32) s_id.b.domain << 16) |
+		       ((u32) s_id.b.area   <<  8) |
+		       ((u32) s_id.b.al_pa));
+
+		if (btree_lookup32(&lport->lport_fcport_map, key)) {
+			WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
+			     s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+			btree_update32(&lport->lport_fcport_map, key, se_nacl);
+		} else {
+			btree_insert32(&lport->lport_fcport_map, key, se_nacl,
+			    GFP_ATOMIC);
+		}
+
+		sess->d_id = s_id;
+		nacl->nport_id = key;
+	}
+
+	sess->conf_compl_supported = conf_compl_supported;
+
+	/* Reset logout parameters to default */
+	sess->logout_on_delete = 1;
+	sess->keep_nport_handle = 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+	.handle_cmd		= tcm_qla2xxx_handle_cmd,
+	.handle_data		= tcm_qla2xxx_handle_data,
+	.handle_tmr		= tcm_qla2xxx_handle_tmr,
+	.free_cmd		= tcm_qla2xxx_free_cmd,
+	.free_mcmd		= tcm_qla2xxx_free_mcmd,
+	.free_session		= tcm_qla2xxx_free_session,
+	.update_sess		= tcm_qla2xxx_update_sess,
+	.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+	.find_sess_by_s_id	= tcm_qla2xxx_find_sess_by_s_id,
+	.find_sess_by_loop_id	= tcm_qla2xxx_find_sess_by_loop_id,
+	.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+	.put_sess		= tcm_qla2xxx_put_sess,
+	.shutdown_sess		= tcm_qla2xxx_shutdown_sess,
+	.get_dif_tags		= tcm_qla2xxx_dif_tags,
+	.chk_dif_tags		= tcm_qla2xxx_chk_dif_tags,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+	int rc;
+
+	rc = btree_init32(&lport->lport_fcport_map);
+	if (rc) {
+		pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+		return rc;
+	}
+
+	lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+				65536);
+	if (!lport->lport_loopid_map) {
+		pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+		    sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+		btree_destroy32(&lport->lport_fcport_map);
+		return -ENOMEM;
+	}
+	memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+	       * 65536);
+	pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+	       sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+	return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
+					 void *target_lport_ptr,
+					 u64 npiv_wwpn, u64 npiv_wwnn)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct tcm_qla2xxx_lport *lport =
+			(struct tcm_qla2xxx_lport *)target_lport_ptr;
+	/*
+	 * Setup tgt_ops, local pointer to vha and target_lport_ptr
+	 */
+	ha->tgt.tgt_ops = &tcm_qla2xxx_template;
+	vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+	lport->qla_vha = vha;
+
+	return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport;
+	u64 wwpn;
+	int ret = -ENODEV;
+
+	if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_wwpn = wwpn;
+	tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+				wwpn);
+	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
+
+	ret = tcm_qla2xxx_init_lport(lport);
+	if (ret != 0)
+		goto out;
+
+	ret = qlt_lport_register(lport, wwpn, 0, 0,
+				 tcm_qla2xxx_lport_register_cb);
+	if (ret != 0)
+		goto out_lport;
+
+	return &lport->lport_wwn;
+out_lport:
+	vfree(lport->lport_loopid_map);
+	btree_destroy32(&lport->lport_fcport_map);
+out:
+	kfree(lport);
+	return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *vha = lport->qla_vha;
+	struct se_node_acl *node;
+	u32 key = 0;
+
+	/*
+	 * Call into qla2x_target.c LLD logic to complete the
+	 * shutdown of struct qla_tgt after the call to
+	 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+	 */
+	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
+		qlt_stop_phase2(vha->vha_tgt.qla_tgt);
+
+	qlt_lport_deregister(vha);
+
+	vfree(lport->lport_loopid_map);
+	btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+		btree_remove32(&lport->lport_fcport_map, key);
+	btree_destroy32(&lport->lport_fcport_map);
+	kfree(lport);
+}
+
+static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
+					      void *target_lport_ptr,
+					      u64 npiv_wwpn, u64 npiv_wwnn)
+{
+	struct fc_vport *vport;
+	struct Scsi_Host *sh = base_vha->host;
+	struct scsi_qla_host *npiv_vha;
+	struct tcm_qla2xxx_lport *lport =
+			(struct tcm_qla2xxx_lport *)target_lport_ptr;
+	struct tcm_qla2xxx_lport *base_lport =
+			(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+	struct fc_vport_identifiers vport_id;
+
+	if (qla_ini_mode_enabled(base_vha)) {
+		pr_err("qla2xxx base_vha not enabled for target mode\n");
+		return -EPERM;
+	}
+
+	if (!base_lport || !base_lport->tpg_1 ||
+	    !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+		pr_err("qla2xxx base_lport or tpg_1 not available\n");
+		return -EPERM;
+	}
+
+	memset(&vport_id, 0, sizeof(vport_id));
+	vport_id.port_name = npiv_wwpn;
+	vport_id.node_name = npiv_wwnn;
+	vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+	vport_id.vport_type = FC_PORTTYPE_NPIV;
+	vport_id.disable = false;
+
+	vport = fc_vport_create(sh, 0, &vport_id);
+	if (!vport) {
+		pr_err("fc_vport_create failed for qla2xxx_npiv\n");
+		return -ENODEV;
+	}
+	/*
+	 * Setup local pointer to NPIV vhba + target_lport_ptr
+	 */
+	npiv_vha = (struct scsi_qla_host *)vport->dd_data;
+	npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
+	lport->qla_vha = npiv_vha;
+	scsi_host_get(npiv_vha->host);
+	return 0;
+}
+
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_qla2xxx_lport *lport;
+	u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
+	char *p, tmp[128];
+	int ret;
+
+	snprintf(tmp, 128, "%s", name);
+
+	p = strchr(tmp, '@');
+	if (!p) {
+		pr_err("Unable to locate NPIV '@' separator\n");
+		return ERR_PTR(-EINVAL);
+	}
+	*p++ = '\0';
+
+	if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
+				       &npiv_wwpn, &npiv_wwnn) < 0)
+		return ERR_PTR(-EINVAL);
+
+	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+	if (!lport) {
+		pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	lport->lport_npiv_wwpn = npiv_wwpn;
+	lport->lport_npiv_wwnn = npiv_wwnn;
+	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
+
+	ret = tcm_qla2xxx_init_lport(lport);
+	if (ret != 0)
+		goto out;
+
+	ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
+				 tcm_qla2xxx_lport_register_npiv_cb);
+	if (ret != 0)
+		goto out_lport;
+
+	return &lport->lport_wwn;
+out_lport:
+	vfree(lport->lport_loopid_map);
+	btree_destroy32(&lport->lport_fcport_map);
+out:
+	kfree(lport);
+	return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+	struct tcm_qla2xxx_lport *lport = container_of(wwn,
+			struct tcm_qla2xxx_lport, lport_wwn);
+	struct scsi_qla_host *npiv_vha = lport->qla_vha;
+	struct qla_hw_data *ha = npiv_vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	scsi_host_put(npiv_vha->host);
+	/*
+	 * Notify libfc that we want to release the vha->fc_vport
+	 */
+	fc_vport_terminate(npiv_vha->fc_vport);
+	scsi_host_put(base_vha->host);
+	kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
+		char *page)
+{
+	return sprintf(page,
+	    "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n",
+	    QLA2XXX_VERSION, utsname()->sysname,
+	    utsname()->machine, utsname()->release);
+}
+
+CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+	&tcm_qla2xxx_wwn_attr_version,
+	NULL,
+};
+
+static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
+	.module				= THIS_MODULE,
+	.name				= "qla2xxx",
+	.node_acl_size			= sizeof(struct tcm_qla2xxx_nacl),
+	/*
+	 * XXX: Limit assumes single page per scatter-gather-list entry.
+	 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096
+	 */
+	.max_data_sg_nents		= 1200,
+	.get_fabric_name		= tcm_qla2xxx_get_fabric_name,
+	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
+	.tpg_get_tag			= tcm_qla2xxx_get_tag,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect =
+					tcm_qla2xxx_check_demo_write_protect,
+	.tpg_check_prod_mode_write_protect =
+					tcm_qla2xxx_check_prod_write_protect,
+	.tpg_check_prot_fabric_only	= tcm_qla2xxx_check_prot_fabric_only,
+	.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
+	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.check_stop_free		= tcm_qla2xxx_check_stop_free,
+	.release_cmd			= tcm_qla2xxx_release_cmd,
+	.close_session			= tcm_qla2xxx_close_session,
+	.sess_get_index			= tcm_qla2xxx_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_qla2xxx_write_pending,
+	.write_pending_status		= tcm_qla2xxx_write_pending_status,
+	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
+	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
+	.queue_data_in			= tcm_qla2xxx_queue_data_in,
+	.queue_status			= tcm_qla2xxx_queue_status,
+	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.aborted_task			= tcm_qla2xxx_aborted_task,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_qla2xxx_make_lport,
+	.fabric_drop_wwn		= tcm_qla2xxx_drop_lport,
+	.fabric_make_tpg		= tcm_qla2xxx_make_tpg,
+	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
+	.fabric_init_nodeacl		= tcm_qla2xxx_init_nodeacl,
+
+	.tfc_wwn_attrs			= tcm_qla2xxx_wwn_attrs,
+	.tfc_tpg_base_attrs		= tcm_qla2xxx_tpg_attrs,
+	.tfc_tpg_attrib_attrs		= tcm_qla2xxx_tpg_attrib_attrs,
+};
+
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+	.module				= THIS_MODULE,
+	.name				= "qla2xxx_npiv",
+	.node_acl_size			= sizeof(struct tcm_qla2xxx_nacl),
+	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,
+	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,
+	.tpg_get_tag			= tcm_qla2xxx_get_tag,
+	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode,
+	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache,
+	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
+	.tpg_check_prod_mode_write_protect =
+	    tcm_qla2xxx_check_prod_write_protect,
+	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_demo_mode_login_only,
+	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index,
+	.check_stop_free                = tcm_qla2xxx_check_stop_free,
+	.release_cmd			= tcm_qla2xxx_release_cmd,
+	.close_session			= tcm_qla2xxx_close_session,
+	.sess_get_index			= tcm_qla2xxx_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= tcm_qla2xxx_write_pending,
+	.write_pending_status		= tcm_qla2xxx_write_pending_status,
+	.set_default_node_attributes	= tcm_qla2xxx_set_default_node_attrs,
+	.get_cmd_state			= tcm_qla2xxx_get_cmd_state,
+	.queue_data_in			= tcm_qla2xxx_queue_data_in,
+	.queue_status			= tcm_qla2xxx_queue_status,
+	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp,
+	.aborted_task			= tcm_qla2xxx_aborted_task,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn		= tcm_qla2xxx_npiv_make_lport,
+	.fabric_drop_wwn		= tcm_qla2xxx_npiv_drop_lport,
+	.fabric_make_tpg		= tcm_qla2xxx_npiv_make_tpg,
+	.fabric_drop_tpg		= tcm_qla2xxx_drop_tpg,
+	.fabric_init_nodeacl		= tcm_qla2xxx_init_nodeacl,
+
+	.tfc_wwn_attrs			= tcm_qla2xxx_wwn_attrs,
+	.tfc_tpg_base_attrs		= tcm_qla2xxx_npiv_tpg_attrs,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+	int ret;
+
+	pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n",
+	    QLA2XXX_VERSION, utsname()->sysname,
+	    utsname()->machine, utsname()->release);
+
+	ret = target_register_template(&tcm_qla2xxx_ops);
+	if (ret)
+		return ret;
+
+	ret = target_register_template(&tcm_qla2xxx_npiv_ops);
+	if (ret)
+		goto out_fabric;
+
+	tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+						WQ_MEM_RECLAIM, 0);
+	if (!tcm_qla2xxx_free_wq) {
+		ret = -ENOMEM;
+		goto out_fabric_npiv;
+	}
+
+	tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+	if (!tcm_qla2xxx_cmd_wq) {
+		ret = -ENOMEM;
+		goto out_free_wq;
+	}
+
+	return 0;
+
+out_free_wq:
+	destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+	target_unregister_template(&tcm_qla2xxx_npiv_ops);
+out_fabric:
+	target_unregister_template(&tcm_qla2xxx_ops);
+	return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+	destroy_workqueue(tcm_qla2xxx_cmd_wq);
+	destroy_workqueue(tcm_qla2xxx_free_wq);
+
+	target_unregister_template(&tcm_qla2xxx_ops);
+	target_unregister_template(&tcm_qla2xxx_npiv_ops);
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+	int ret;
+
+	ret = tcm_qla2xxx_register_configfs();
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+	tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 0000000..147cf6c
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN	32
+/*
+ * Number of pre-allocated per-session tags, based upon the worst-case
+ * per port number of iocbs
+ */
+#define TCM_QLA2XXX_DEFAULT_TAGS 2088
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+	struct se_node_acl se_node_acl;
+
+	/* From libfc struct fc_rport->port_id */
+	u32 nport_id;
+	/* Binary World Wide unique Node Name for remote FC Initiator Nport */
+	u64 nport_wwnn;
+	/* ASCII formatted WWPN for FC Initiator Nport */
+	char nport_name[TCM_QLA2XXX_NAMELEN];
+	/* Pointer to fc_port */
+	struct fc_port *fc_port;
+	/* Pointer to TCM FC nexus */
+	struct se_session *nport_nexus;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+	int generate_node_acls;
+	int cache_dynamic_acls;
+	int demo_mode_write_protect;
+	int prod_mode_write_protect;
+	int demo_mode_login_only;
+	int fabric_prot_type;
+	int jam_host;
+};
+
+struct tcm_qla2xxx_tpg {
+	/* FC lport target portal group tag for TCM */
+	u16 lport_tpgt;
+	/* Atomic bit to determine TPG active status */
+	atomic_t lport_tpg_enabled;
+	/* Pointer back to tcm_qla2xxx_lport */
+	struct tcm_qla2xxx_lport *lport;
+	/* Used by tcm_qla2xxx_tpg_attrib_cit */
+	struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+	/* Returned by tcm_qla2xxx_make_tpg() */
+	struct se_portal_group se_tpg;
+};
+
+struct tcm_qla2xxx_fc_loopid {
+	struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+	/* Binary World Wide unique Port Name for FC Target Lport */
+	u64 lport_wwpn;
+	/* Binary World Wide unique Port Name for FC NPIV Target Lport */
+	u64 lport_npiv_wwpn;
+	/* Binary World Wide unique Node Name for FC NPIV Target Lport */
+	u64 lport_npiv_wwnn;
+	/* ASCII formatted WWPN for FC Target Lport */
+	char lport_name[TCM_QLA2XXX_NAMELEN];
+	/* ASCII formatted naa WWPN for VPD page 83 etc */
+	char lport_naa_name[TCM_QLA2XXX_NAMELEN];
+	/* map for fc_port pointers in 24-bit FC Port ID space */
+	struct btree_head32 lport_fcport_map;
+	/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+	struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+	/* Pointer to struct scsi_qla_host from qla2xxx LLD */
+	struct scsi_qla_host *qla_vha;
+	/* Pointer to struct qla_tgt pointer */
+	struct qla_tgt lport_qla_tgt;
+	/* Pointer to TPG=1 for non NPIV mode */
+	struct tcm_qla2xxx_tpg *tpg_1;
+	/* Returned by tcm_qla2xxx_make_lport() */
+	struct se_wwn lport_wwn;
+};