[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/Makefile b/src/kernel/linux/v4.14/drivers/scsi/lpfc/Makefile
new file mode 100644
index 0000000..cb6aa80
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/Makefile
@@ -0,0 +1,36 @@
+#/*******************************************************************
+# * This file is part of the Emulex Linux Device Driver for         *
+# * Fibre Channel Host Bus Adapters.                                *
+# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+# * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+# * EMULEX and SLI are trademarks of Emulex.                        *
+# * www.broadcom.com                                                *
+# *                                                                 *
+# * This program is free software; you can redistribute it and/or   *
+# * modify it under the terms of version 2 of the GNU General       *
+# * Public License as published by the Free Software Foundation.    *
+# * This program is distributed in the hope that it will be useful. *
+# * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+# * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+# * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+# * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+# * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+# * more details, a copy of which can be found in the file COPYING  *
+# * included with this package.                                     *
+# *******************************************************************/
+######################################################################
+
+ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
+ccflags-$(GCOV) += -O0
+
+ifdef WARNINGS_BECOME_ERRORS
+ccflags-y += -Werror
+endif
+
+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
+	lpfc_hbadisc.o	lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o   \
+	lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
+	lpfc_nvme.o lpfc_nvmet.o
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc.h
new file mode 100644
index 0000000..5fc41aa
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc.h
@@ -0,0 +1,1256 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <scsi/scsi_host.h>
+#include <linux/ktime.h>
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
+struct lpfc_sli2_slim;
+
+#define ELX_MODEL_NAME_SIZE	80
+
+#define LPFC_PCI_DEV_LP		0x1
+#define LPFC_PCI_DEV_OC		0x2
+
+#define LPFC_SLI_REV2		2
+#define LPFC_SLI_REV3		3
+#define LPFC_SLI_REV4		4
+
+#define LPFC_MAX_TARGET		4096	/* max number of targets supported */
+#define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
+					   requests */
+#define LPFC_MAX_NS_RETRY	3	/* Number of retry attempts to contact
+					   the NameServer  before giving up. */
+#define LPFC_CMD_PER_LUN	3	/* max outstanding cmds per lun */
+#define LPFC_DEFAULT_SG_SEG_CNT 64	/* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128	/* sg element count per scsi
+		cmnd for menlo needs nearly twice as for firmware
+		downloads using bsg */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ	0x800	/* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512	/* sg element count per scsi cmnd  */
+#define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT	512	/* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT	4096	/* BPL element count per scsi cmnd */
+#define LPFC_MAX_NVME_SEG_CNT	128	/* max SGL element cnt per NVME cmnd */
+
+#define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
+#define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
+#define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN		100	/* vport symbolic name length */
+#define LPFC_TGTQ_INTERVAL	40000	/* Min amount of time between tgt
+					   queue depth change in millisecs */
+#define LPFC_TGTQ_RAMPUP_PCENT	5	/* Target queue rampup in percentage */
+#define LPFC_MIN_TGT_QDEPTH	10
+#define LPFC_MAX_TGT_QDEPTH	0xFFFF
+
+#define  LPFC_MAX_BUCKET_COUNT 20	/* Maximum no. of buckets for stat data
+					   collection. */
+/*
+ * Following time intervals are used of adjusting SCSI device
+ * queue depths when there are driver resource error or Firmware
+ * resource error.
+ */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL	(msecs_to_jiffies(1000 * 1))
+
+/* Number of exchanges reserved for discovery to complete */
+#define LPFC_DISC_IOCB_BUFF_COUNT 20
+
+#define LPFC_HB_MBOX_INTERVAL   5	/* Heart beat interval in seconds. */
+#define LPFC_HB_MBOX_TIMEOUT    30	/* Heart beat timeout  in seconds. */
+
+#define LPFC_LOOK_AHEAD_OFF	0	/* Look ahead logic is turned off */
+
+/* Error Attention event polling interval */
+#define LPFC_ERATT_POLL_INTERVAL	5 /* EATT poll interval in seconds */
+
+/* Define macros for 64 bit support */
+#define putPaddrLow(addr)    ((uint32_t) (0xffffffff & (u64)(addr)))
+#define putPaddrHigh(addr)   ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
+#define getPaddr(high, low)  ((dma_addr_t)( \
+			     (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
+/* Provide maximum configuration definitions. */
+#define LPFC_DRVR_TIMEOUT	16	/* driver iocb timeout value in sec */
+#define FC_MAX_ADPTMSG		64
+
+#define MAX_HBAEVT	32
+#define MAX_HBAS_NO_RESET 16
+
+/* Number of MSI-X vectors the driver uses */
+#define LPFC_MSIX_VECTORS	2
+
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY		0	/* bit 0 */
+
+/* queue dump line buffer size */
+#define LPFC_LBUF_SZ		128
+
+/* mailbox system shutdown options */
+#define LPFC_MBX_NO_WAIT	0
+#define LPFC_MBX_WAIT		1
+
+enum lpfc_polling_flags {
+	ENABLE_FCP_RING_POLLING = 0x1,
+	DISABLE_FCP_RING_INT    = 0x2
+};
+
+struct perf_prof {
+	uint16_t cmd_cpu[40];
+	uint16_t rsp_cpu[40];
+	uint16_t qh_cpu[40];
+	uint16_t wqidx[40];
+};
+
+/*
+ * Provide for FC4 TYPE x28 - NVME.  The
+ * bit mask for FCP and NVME is 0x8 identically
+ * because they are 32 bit positions distance.
+ */
+#define LPFC_FC4_TYPE_BITMASK	0x00000100
+
+/* Provide DMA memory definitions the driver uses per port instance. */
+struct lpfc_dmabuf {
+	struct list_head list;
+	void *virt;		/* virtual address ptr */
+	dma_addr_t phys;	/* mapped address */
+	uint32_t   buffer_tag;	/* used for tagged queue ring */
+};
+
+struct lpfc_nvmet_ctxbuf {
+	struct list_head list;
+	struct lpfc_nvmet_rcv_ctx *context;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_sglq *sglq;
+};
+
+struct lpfc_dma_pool {
+	struct lpfc_dmabuf   *elements;
+	uint32_t    max_count;
+	uint32_t    current_count;
+};
+
+struct hbq_dmabuf {
+	struct lpfc_dmabuf hbuf;
+	struct lpfc_dmabuf dbuf;
+	uint16_t total_size;
+	uint16_t bytes_recv;
+	uint32_t tag;
+	struct lpfc_cq_event cq_event;
+	unsigned long time_stamp;
+	void *context;
+};
+
+struct rqb_dmabuf {
+	struct lpfc_dmabuf hbuf;
+	struct lpfc_dmabuf dbuf;
+	uint16_t total_size;
+	uint16_t bytes_recv;
+	uint16_t idx;
+	struct lpfc_queue *hrq;	  /* ptr to associated Header RQ */
+	struct lpfc_queue *drq;	  /* ptr to associated Data RQ */
+};
+
+/* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
+#define MEM_PRI		0x100
+
+
+/****************************************************************************/
+/*      Device VPD save area                                                */
+/****************************************************************************/
+typedef struct lpfc_vpd {
+	uint32_t status;	/* vpd status value */
+	uint32_t length;	/* number of bytes actually returned */
+	struct {
+		uint32_t rsvd1;	/* Revision numbers */
+		uint32_t biuRev;
+		uint32_t smRev;
+		uint32_t smFwRev;
+		uint32_t endecRev;
+		uint16_t rBit;
+		uint8_t fcphHigh;
+		uint8_t fcphLow;
+		uint8_t feaLevelHigh;
+		uint8_t feaLevelLow;
+		uint32_t postKernRev;
+		uint32_t opFwRev;
+		uint8_t opFwName[16];
+		uint32_t sli1FwRev;
+		uint8_t sli1FwName[16];
+		uint32_t sli2FwRev;
+		uint8_t sli2FwName[16];
+	} rev;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint32_t rsvd3  :19;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
+		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
+		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
+		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
+		uint32_t chbs   : 1;  /* Cofigure Host Backing store          */
+		uint32_t cinb   : 1;  /* Enable Interrupt Notification Block  */
+		uint32_t cerbm	: 1;  /* Configure Enhanced Receive Buf Mgmt  */
+		uint32_t cmx	: 1;  /* Configure Max XRIs                   */
+		uint32_t cmr	: 1;  /* Configure Max RPIs                   */
+#else	/*  __LITTLE_ENDIAN */
+		uint32_t cmr	: 1;  /* Configure Max RPIs                   */
+		uint32_t cmx	: 1;  /* Configure Max XRIs                   */
+		uint32_t cerbm	: 1;  /* Configure Enhanced Receive Buf Mgmt  */
+		uint32_t cinb   : 1;  /* Enable Interrupt Notification Block  */
+		uint32_t chbs   : 1;  /* Cofigure Host Backing store          */
+		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
+		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
+		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd3  :19;  /* Reserved                             */
+#endif
+	} sli3Feat;
+} lpfc_vpd_t;
+
+struct lpfc_scsi_buf;
+
+
+/*
+ * lpfc stat counters
+ */
+struct lpfc_stats {
+	/* Statistics for ELS commands */
+	uint32_t elsLogiCol;
+	uint32_t elsRetryExceeded;
+	uint32_t elsXmitRetry;
+	uint32_t elsDelayRetry;
+	uint32_t elsRcvDrop;
+	uint32_t elsRcvFrame;
+	uint32_t elsRcvRSCN;
+	uint32_t elsRcvRNID;
+	uint32_t elsRcvFARP;
+	uint32_t elsRcvFARPR;
+	uint32_t elsRcvFLOGI;
+	uint32_t elsRcvPLOGI;
+	uint32_t elsRcvADISC;
+	uint32_t elsRcvPDISC;
+	uint32_t elsRcvFAN;
+	uint32_t elsRcvLOGO;
+	uint32_t elsRcvPRLO;
+	uint32_t elsRcvPRLI;
+	uint32_t elsRcvLIRR;
+	uint32_t elsRcvRLS;
+	uint32_t elsRcvRPS;
+	uint32_t elsRcvRPL;
+	uint32_t elsRcvRRQ;
+	uint32_t elsRcvRTV;
+	uint32_t elsRcvECHO;
+	uint32_t elsRcvLCB;
+	uint32_t elsRcvRDP;
+	uint32_t elsXmitFLOGI;
+	uint32_t elsXmitFDISC;
+	uint32_t elsXmitPLOGI;
+	uint32_t elsXmitPRLI;
+	uint32_t elsXmitADISC;
+	uint32_t elsXmitLOGO;
+	uint32_t elsXmitSCR;
+	uint32_t elsXmitRNID;
+	uint32_t elsXmitFARP;
+	uint32_t elsXmitFARPR;
+	uint32_t elsXmitACC;
+	uint32_t elsXmitLSRJT;
+
+	uint32_t frameRcvBcast;
+	uint32_t frameRcvMulti;
+	uint32_t strayXmitCmpl;
+	uint32_t frameXmitDelay;
+	uint32_t xriCmdCmpl;
+	uint32_t xriStatErr;
+	uint32_t LinkUp;
+	uint32_t LinkDown;
+	uint32_t LinkMultiEvent;
+	uint32_t NoRcvBuf;
+	uint32_t fcpCmd;
+	uint32_t fcpCmpl;
+	uint32_t fcpRspErr;
+	uint32_t fcpRemoteStop;
+	uint32_t fcpPortRjt;
+	uint32_t fcpPortBusy;
+	uint32_t fcpError;
+	uint32_t fcpLocalErr;
+};
+
+struct lpfc_hba;
+
+
+enum discovery_state {
+	LPFC_VPORT_UNKNOWN     =  0,    /* vport state is unknown */
+	LPFC_VPORT_FAILED      =  1,    /* vport has failed */
+	LPFC_LOCAL_CFG_LINK    =  6,    /* local NPORT Id configured */
+	LPFC_FLOGI             =  7,    /* FLOGI sent to Fabric */
+	LPFC_FDISC             =  8,    /* FDISC sent for vport */
+	LPFC_FABRIC_CFG_LINK   =  9,    /* Fabric assigned NPORT Id
+				         * configured */
+	LPFC_NS_REG            =  10,   /* Register with NameServer */
+	LPFC_NS_QRY            =  11,   /* Query NameServer for NPort ID list */
+	LPFC_BUILD_DISC_LIST   =  12,   /* Build ADISC and PLOGI lists for
+				         * device authentication / discovery */
+	LPFC_DISC_AUTH         =  13,   /* Processing ADISC list */
+	LPFC_VPORT_READY       =  32,
+};
+
+enum hba_state {
+	LPFC_LINK_UNKNOWN    =   0,   /* HBA state is unknown */
+	LPFC_WARM_START      =   1,   /* HBA state after selective reset */
+	LPFC_INIT_START      =   2,   /* Initial state after board reset */
+	LPFC_INIT_MBX_CMDS   =   3,   /* Initialize HBA with mbox commands */
+	LPFC_LINK_DOWN       =   4,   /* HBA initialized, link is down */
+	LPFC_LINK_UP         =   5,   /* Link is up  - issue READ_LA */
+	LPFC_CLEAR_LA        =   6,   /* authentication cmplt - issue
+				       * CLEAR_LA */
+	LPFC_HBA_READY       =  32,
+	LPFC_HBA_ERROR       =  -1
+};
+
+struct lpfc_vport {
+	struct lpfc_hba *phba;
+	struct list_head listentry;
+	uint8_t port_type;
+#define LPFC_PHYSICAL_PORT 1
+#define LPFC_NPIV_PORT  2
+#define LPFC_FABRIC_PORT 3
+	enum discovery_state port_state;
+
+	uint16_t vpi;
+	uint16_t vfi;
+	uint8_t vpi_state;
+#define LPFC_VPI_REGISTERED	0x1
+
+	uint32_t fc_flag;	/* FC flags */
+/* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+#define FC_PT2PT                0x1	 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI          0x2	 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO             0x4	 /* Discovery timer running */
+#define FC_PUBLIC_LOOP          0x8	 /* Public loop */
+#define FC_LBIT                 0x10	 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE            0x20	 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE             0x40	 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE         0x80	 /* Interface is offline for diag */
+#define FC_FABRIC               0x100	 /* We are fabric attached */
+#define FC_VPORT_LOGO_RCVD      0x200    /* LOGO received on vport */
+#define FC_RSCN_DISCOVERY       0x400	 /* Auth all devices after RSCN */
+#define FC_LOGO_RCVD_DID_CHNG   0x800    /* FDISC on phys port detect DID chng*/
+#define FC_SCSI_SCAN_TMO        0x4000	 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY      0x8000	 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE         0x10000	 /* NPort discovery active */
+#define FC_BYPASSED_MODE        0x20000	 /* NPort is in bypassed mode */
+#define FC_VPORT_NEEDS_REG_VPI	0x80000  /* Needs to have its vpi registered */
+#define FC_RSCN_DEFERRED	0x100000 /* A deferred RSCN being processed */
+#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
+#define FC_VPORT_CVL_RCVD	0x400000 /* VLink failed due to CVL	 */
+#define FC_VFI_REGISTERED	0x800000 /* VFI is registered */
+#define FC_FDISC_COMPLETED	0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED		0x2000000/* Delay NPort discovery */
+
+	uint32_t ct_flags;
+#define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
+#define FC_CT_RNN_ID		0x2	 /* RNN_ID accepted by switch */
+#define FC_CT_RSNN_NN		0x4	 /* RSNN_NN accepted by switch */
+#define FC_CT_RSPN_ID		0x8	 /* RSPN_ID accepted by switch */
+#define FC_CT_RFT_ID		0x10	 /* RFT_ID accepted by switch */
+
+	struct list_head fc_nodes;
+
+	/* Keep counters for the number of entries in each list. */
+	uint16_t fc_plogi_cnt;
+	uint16_t fc_adisc_cnt;
+	uint16_t fc_reglogin_cnt;
+	uint16_t fc_prli_cnt;
+	uint16_t fc_unmap_cnt;
+	uint16_t fc_map_cnt;
+	uint16_t fc_npr_cnt;
+	uint16_t fc_unused_cnt;
+	struct serv_parm fc_sparam;	/* buffer for our service parameters */
+
+	uint32_t fc_myDID;	/* fibre channel S_ID */
+	uint32_t fc_prevDID;	/* previous fibre channel S_ID */
+	struct lpfc_name fabric_portname;
+	struct lpfc_name fabric_nodename;
+
+	int32_t stopped;   /* HBA has not been restarted since last ERATT */
+	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
+
+	uint32_t num_disc_nodes;	/* in addition to hba_state */
+	uint32_t gidft_inp;		/* cnt of outstanding GID_FTs */
+
+	uint32_t fc_nlp_cnt;	/* outstanding NODELIST requests */
+	uint32_t fc_rscn_id_cnt;	/* count of RSCNs payloads in list */
+	uint32_t fc_rscn_flush;		/* flag use of fc_rscn_id_list */
+	struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+	struct lpfc_name fc_nodename;	/* fc nodename */
+	struct lpfc_name fc_portname;	/* fc portname */
+
+	struct lpfc_work_evt disc_timeout_evt;
+
+	struct timer_list fc_disctmo;	/* Discovery rescue timer */
+	uint8_t fc_ns_retry;	/* retries for fabric nameserver */
+	uint32_t fc_prli_sent;	/* cntr for outstanding PRLIs */
+
+	spinlock_t work_port_lock;
+	uint32_t work_port_events; /* Timeout to be handled  */
+#define WORKER_DISC_TMO                0x1	/* vport: Discovery timeout */
+#define WORKER_ELS_TMO                 0x2	/* vport: ELS timeout */
+#define WORKER_DELAYED_DISC_TMO        0x8	/* vport: delayed discovery */
+
+#define WORKER_MBOX_TMO                0x100	/* hba: MBOX timeout */
+#define WORKER_HB_TMO                  0x200	/* hba: Heart beat timeout */
+#define WORKER_FABRIC_BLOCK_TMO        0x400	/* hba: fabric block timeout */
+#define WORKER_RAMP_DOWN_QUEUE         0x800	/* hba: Decrease Q depth */
+#define WORKER_RAMP_UP_QUEUE           0x1000	/* hba: Increase Q depth */
+#define WORKER_SERVICE_TXQ             0x2000	/* hba: IOCBs on the txq */
+
+	struct timer_list els_tmofunc;
+	struct timer_list delayed_disc_tmo;
+
+	int unreg_vpi_cmpl;
+
+	uint8_t load_flag;
+#define FC_LOADING		0x1	/* HBA in process of loading drvr */
+#define FC_UNLOADING		0x2	/* HBA in process of unloading drvr */
+#define FC_ALLOW_FDMI		0x4	/* port is ready for FDMI requests */
+	/* Vport Config Parameters */
+	uint32_t cfg_scan_down;
+	uint32_t cfg_lun_queue_depth;
+	uint32_t cfg_nodev_tmo;
+	uint32_t cfg_devloss_tmo;
+	uint32_t cfg_restrict_login;
+	uint32_t cfg_peer_port_login;
+	uint32_t cfg_fcp_class;
+	uint32_t cfg_use_adisc;
+	uint32_t cfg_discovery_threads;
+	uint32_t cfg_log_verbose;
+	uint32_t cfg_max_luns;
+	uint32_t cfg_enable_da_id;
+	uint32_t cfg_max_scsicmpl_time;
+	uint32_t cfg_tgt_queue_depth;
+	uint32_t cfg_first_burst_size;
+	uint32_t dev_loss_tmo_changed;
+
+	struct fc_vport *fc_vport;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct dentry *debug_disc_trc;
+	struct dentry *debug_nodelist;
+	struct dentry *debug_nvmestat;
+	struct dentry *debug_nvmektime;
+	struct dentry *debug_cpucheck;
+	struct dentry *vport_debugfs_root;
+	struct lpfc_debugfs_trc *disc_trc;
+	atomic_t disc_trc_cnt;
+#endif
+	uint8_t stat_data_enabled;
+	uint8_t stat_data_blocked;
+	struct list_head rcv_buffer_list;
+	unsigned long rcv_buffer_time_stamp;
+	uint32_t vport_flag;
+#define STATIC_VPORT	1
+#define FAWWPN_SET	2
+#define FAWWPN_PARAM_CHG	4
+
+	uint16_t fdmi_num_disc;
+	uint32_t fdmi_hba_mask;
+	uint32_t fdmi_port_mask;
+
+	/* There is a single nvme instance per vport. */
+	struct nvme_fc_local_port *localport;
+	uint8_t  nvmei_support; /* driver supports NVME Initiator */
+	uint32_t last_fcp_wqidx;
+};
+
+struct hbq_s {
+	uint16_t entry_count;	  /* Current number of HBQ slots */
+	uint16_t buffer_count;	  /* Current number of buffers posted */
+	uint32_t next_hbqPutIdx;  /* Index to next HBQ slot to use */
+	uint32_t hbqPutIdx;	  /* HBQ slot to use */
+	uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
+	void    *hbq_virt;	  /* Virtual ptr to this hbq */
+	struct list_head hbq_buffer_list;  /* buffers assigned to this HBQ */
+				  /* Callback for HBQ buffer allocation */
+	struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *);
+				  /* Callback for HBQ buffer free */
+	void               (*hbq_free_buffer) (struct lpfc_hba *,
+					       struct hbq_dmabuf *);
+};
+
+/* this matches the position in the lpfc_hbq_defs array */
+#define LPFC_ELS_HBQ	0
+#define LPFC_MAX_HBQS	1
+
+enum hba_temp_state {
+	HBA_NORMAL_TEMP,
+	HBA_OVER_TEMP
+};
+
+enum intr_type_t {
+	NONE = 0,
+	INTx,
+	MSI,
+	MSIX,
+};
+
+#define LPFC_CT_CTX_MAX		64
+struct unsol_rcv_ct_ctx {
+	uint32_t ctxt_id;
+	uint32_t SID;
+	uint32_t valid;
+#define UNSOL_INVALID		0
+#define UNSOL_VALID		1
+	uint16_t oxid;
+	uint16_t rxid;
+};
+
+#define LPFC_USER_LINK_SPEED_AUTO	0	/* auto select (default)*/
+#define LPFC_USER_LINK_SPEED_1G		1	/* 1 Gigabaud */
+#define LPFC_USER_LINK_SPEED_2G		2	/* 2 Gigabaud */
+#define LPFC_USER_LINK_SPEED_4G		4	/* 4 Gigabaud */
+#define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
+#define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
+#define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
+#define LPFC_USER_LINK_SPEED_32G	32	/* 32 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_32G
+#define LPFC_USER_LINK_SPEED_BITMAP  ((1ULL << LPFC_USER_LINK_SPEED_32G) | \
+				     (1 << LPFC_USER_LINK_SPEED_16G) | \
+				     (1 << LPFC_USER_LINK_SPEED_10G) | \
+				     (1 << LPFC_USER_LINK_SPEED_8G) | \
+				     (1 << LPFC_USER_LINK_SPEED_4G) | \
+				     (1 << LPFC_USER_LINK_SPEED_2G) | \
+				     (1 << LPFC_USER_LINK_SPEED_1G) | \
+				     (1 << LPFC_USER_LINK_SPEED_AUTO))
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32"
+
+enum nemb_type {
+	nemb_mse = 1,
+	nemb_hbd
+};
+
+enum mbox_type {
+	mbox_rd = 1,
+	mbox_wr
+};
+
+enum dma_type {
+	dma_mbox = 1,
+	dma_ebuf
+};
+
+enum sta_type {
+	sta_pre_addr = 1,
+	sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+	uint32_t state;
+#define LPFC_BSG_MBOX_IDLE		0
+#define LPFC_BSG_MBOX_HOST              1
+#define LPFC_BSG_MBOX_PORT		2
+#define LPFC_BSG_MBOX_DONE		3
+#define LPFC_BSG_MBOX_ABTS		4
+	enum nemb_type nembType;
+	enum mbox_type mboxType;
+	uint32_t numBuf;
+	uint32_t mbxTag;
+	uint32_t seqNum;
+	struct lpfc_dmabuf *mbx_dmabuf;
+	struct list_head ext_dmabuf_list;
+};
+
+struct lpfc_hba {
+	/* SCSI interface function jump table entries */
+	int (*lpfc_new_scsi_buf)
+		(struct lpfc_vport *, int);
+	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+		(struct lpfc_hba *, struct lpfc_nodelist *);
+	int (*lpfc_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_scsi_unprep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_release_scsi_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_rampdown_queue_depth)
+		(struct lpfc_hba *);
+	void (*lpfc_scsi_prep_cmnd)
+		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		 struct lpfc_nodelist *);
+
+	/* IOCB interface function jump table entries */
+	int (*__lpfc_sli_issue_iocb)
+		(struct lpfc_hba *, uint32_t,
+		 struct lpfc_iocbq *, uint32_t);
+	void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+			 struct lpfc_iocbq *);
+	int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+	IOCB_t * (*lpfc_get_iocb_from_iocbq)
+		(struct lpfc_iocbq *);
+	void (*lpfc_scsi_cmd_iocb_cmpl)
+		(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+	/* MBOX interface function jump table entries */
+	int (*lpfc_sli_issue_mbox)
+		(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+	/* Slow-path IOCB process function jump table entries */
+	void (*lpfc_sli_handle_slow_ring_event)
+		(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		 uint32_t mask);
+
+	/* INIT device interface function jump table entries */
+	int (*lpfc_sli_hbq_to_firmware)
+		(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+	int (*lpfc_sli_brdrestart)
+		(struct lpfc_hba *);
+	int (*lpfc_sli_brdready)
+		(struct lpfc_hba *, uint32_t);
+	void (*lpfc_handle_eratt)
+		(struct lpfc_hba *);
+	void (*lpfc_stop_port)
+		(struct lpfc_hba *);
+	int (*lpfc_hba_init_link)
+		(struct lpfc_hba *, uint32_t);
+	int (*lpfc_hba_down_link)
+		(struct lpfc_hba *, uint32_t);
+	int (*lpfc_selective_reset)
+		(struct lpfc_hba *);
+
+	int (*lpfc_bg_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	/* Add new entries here */
+
+	/* SLI4 specific HBA data structure */
+	struct lpfc_sli4_hba sli4_hba;
+
+	struct lpfc_sli sli;
+	uint8_t pci_dev_grp;	/* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+	uint32_t sli_rev;		/* SLI2, SLI3, or SLI4 */
+	uint32_t sli3_options;		/* Mask of enabled SLI3 options */
+#define LPFC_SLI3_HBQ_ENABLED		0x01
+#define LPFC_SLI3_NPIV_ENABLED		0x02
+#define LPFC_SLI3_VPORT_TEARDOWN	0x04
+#define LPFC_SLI3_CRP_ENABLED		0x08
+#define LPFC_SLI3_BG_ENABLED		0x20
+#define LPFC_SLI3_DSS_ENABLED		0x40
+#define LPFC_SLI4_PERFH_ENABLED		0x80
+#define LPFC_SLI4_PHWQ_ENABLED		0x100
+	uint32_t iocb_cmd_size;
+	uint32_t iocb_rsp_size;
+
+	enum hba_state link_state;
+	uint32_t link_flag;	/* link state flags */
+#define LS_LOOPBACK_MODE      0x1	/* NPort is in Loopback mode */
+					/* This flag is set while issuing */
+					/* INIT_LINK mailbox command */
+#define LS_NPIV_FAB_SUPPORTED 0x2	/* Fabric supports NPIV */
+#define LS_IGNORE_ERATT       0x4	/* intr handler should ignore ERATT */
+#define LS_MDS_LINK_DOWN      0x8	/* MDS Diagnostics Link Down */
+#define LS_MDS_LOOPBACK      0x10	/* MDS Diagnostics Link Up (Loopback) */
+
+	uint32_t hba_flag;	/* hba generic flags */
+#define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
+#define DEFER_ERATT		0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_MODE		0x4 /* HBA function in FCoE Mode */
+#define HBA_SP_QUEUE_EVT	0x8 /* Slow-path qevt posted to worker thread*/
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT	0x20
+#define ELS_XRI_ABORT_EVENT	0x40
+#define ASYNC_EVENT		0x80
+#define LINK_DISABLED		0x100 /* Link disabled by user */
+#define FCF_TS_INPROG           0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG           0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT		0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
+#define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */
+#define HBA_FCP_IOQ_FLUSH	0x8000 /* FCP I/O queues being flushed */
+#define HBA_FW_DUMP_OP		0x10000 /* Skips fn reset before FW dump */
+#define HBA_RECOVERABLE_UE	0x20000 /* Firmware supports recoverable UE */
+#define HBA_FORCED_LINK_SPEED	0x40000 /*
+					 * Firmware supports Forced Link Speed
+					 * capability
+					 */
+#define HBA_NVME_IOQ_FLUSH      0x80000 /* NVME IO queues flushed. */
+#define NVME_XRI_ABORT_EVENT	0x100000
+
+	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
+	struct lpfc_dmabuf slim2p;
+
+	MAILBOX_t *mbox;
+	uint32_t *mbox_ext;
+	struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
+	uint32_t ha_copy;
+	struct _PCB *pcb;
+	struct _IOCB *IOCBs;
+
+	struct lpfc_dmabuf hbqslimp;
+
+	uint16_t pci_cfg_value;
+
+	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
+
+	uint32_t fc_eventTag;	/* event tag for link attention */
+	uint32_t link_events;
+
+	/* These fields used to be binfo */
+	uint32_t fc_pref_DID;	/* preferred D_ID */
+	uint8_t  fc_pref_ALPA;	/* preferred AL_PA */
+	uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
+	uint32_t fc_edtov;	/* E_D_TOV timer value */
+	uint32_t fc_arbtov;	/* ARB_TOV timer value */
+	uint32_t fc_ratov;	/* R_A_TOV timer value */
+	uint32_t fc_rttov;	/* R_T_TOV timer value */
+	uint32_t fc_altov;	/* AL_TOV timer value */
+	uint32_t fc_crtov;	/* C_R_TOV timer value */
+
+	struct serv_parm fc_fabparam;	/* fabric service parameters buffer */
+	uint8_t alpa_map[128];	/* AL_PA map from READ_LA */
+
+	uint32_t lmt;
+
+	uint32_t fc_topology;	/* link topology, from LINK INIT */
+	uint32_t fc_topology_changed;	/* link topology, from LINK INIT */
+
+	struct lpfc_stats fc_stat;
+
+	struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
+	uint32_t nport_event_cnt;	/* timestamp for nlplist entry */
+
+	uint8_t  wwnn[8];
+	uint8_t  wwpn[8];
+	uint32_t RandomData[7];
+	uint8_t  fcp_embed_io;
+	uint8_t  nvme_support;	/* Firmware supports NVME */
+	uint8_t  nvmet_support;	/* driver supports NVMET */
+#define LPFC_NVMET_MAX_PORTS	32
+	uint8_t  mds_diags_support;
+	uint32_t initial_imax;
+	uint8_t  bbcredit_support;
+
+	/* HBA Config Parameters */
+	uint32_t cfg_ack0;
+	uint32_t cfg_enable_npiv;
+	uint32_t cfg_enable_rrq;
+	uint32_t cfg_topology;
+	uint32_t cfg_link_speed;
+#define LPFC_FCF_FOV 1		/* Fast fcf failover */
+#define LPFC_FCF_PRIORITY 2	/* Priority fcf failover */
+	uint32_t cfg_fcf_failover_policy;
+	uint32_t cfg_fcp_io_sched;
+	uint32_t cfg_fcp2_no_tgt_reset;
+	uint32_t cfg_cr_delay;
+	uint32_t cfg_cr_count;
+	uint32_t cfg_multi_ring_support;
+	uint32_t cfg_multi_ring_rctl;
+	uint32_t cfg_multi_ring_type;
+	uint32_t cfg_poll;
+	uint32_t cfg_poll_tmo;
+	uint32_t cfg_task_mgmt_tmo;
+	uint32_t cfg_use_msi;
+	uint32_t cfg_auto_imax;
+	uint32_t cfg_fcp_imax;
+	uint32_t cfg_fcp_cpu_map;
+	uint32_t cfg_fcp_io_channel;
+	uint32_t cfg_suppress_rsp;
+	uint32_t cfg_nvme_oas;
+	uint32_t cfg_nvme_io_channel;
+	uint32_t cfg_nvmet_mrq;
+	uint32_t cfg_enable_nvmet;
+	uint32_t cfg_nvme_enable_fb;
+	uint32_t cfg_nvmet_fb_size;
+	uint32_t cfg_total_seg_cnt;
+	uint32_t cfg_sg_seg_cnt;
+	uint32_t cfg_nvme_seg_cnt;
+	uint32_t cfg_sg_dma_buf_size;
+	uint64_t cfg_soft_wwnn;
+	uint64_t cfg_soft_wwpn;
+	uint32_t cfg_hba_queue_depth;
+	uint32_t cfg_enable_hba_reset;
+	uint32_t cfg_enable_hba_heartbeat;
+	uint32_t cfg_fof;
+	uint32_t cfg_EnableXLane;
+	uint8_t cfg_oas_tgt_wwpn[8];
+	uint8_t cfg_oas_vpt_wwpn[8];
+	uint32_t cfg_oas_lun_state;
+#define OAS_LUN_ENABLE	1
+#define OAS_LUN_DISABLE	0
+	uint32_t cfg_oas_lun_status;
+#define OAS_LUN_STATUS_EXISTS	0x01
+	uint32_t cfg_oas_flags;
+#define OAS_FIND_ANY_VPORT	0x01
+#define OAS_FIND_ANY_TARGET	0x02
+#define OAS_LUN_VALID	0x04
+	uint32_t cfg_oas_priority;
+	uint32_t cfg_XLanePriority;
+	uint32_t cfg_enable_bg;
+	uint32_t cfg_prot_mask;
+	uint32_t cfg_prot_guard;
+	uint32_t cfg_hostmem_hgp;
+	uint32_t cfg_log_verbose;
+	uint32_t cfg_aer_support;
+	uint32_t cfg_sriov_nr_virtfn;
+	uint32_t cfg_request_firmware_upgrade;
+	uint32_t cfg_iocb_cnt;
+	uint32_t cfg_suppress_link_up;
+	uint32_t cfg_rrq_xri_bitmap_sz;
+	uint32_t cfg_delay_discovery;
+	uint32_t cfg_sli_mode;
+#define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
+#define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
+#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
+	uint32_t cfg_enable_dss;
+	uint32_t cfg_fdmi_on;
+#define LPFC_FDMI_NO_SUPPORT	0	/* FDMI not supported */
+#define LPFC_FDMI_SUPPORT	1	/* FDMI supported? */
+	uint32_t cfg_enable_SmartSAN;
+	uint32_t cfg_enable_mds_diags;
+	uint32_t cfg_enable_fc4_type;
+	uint32_t cfg_enable_bbcr;	/*Enable BB Credit Recovery*/
+	uint32_t cfg_xri_split;
+#define LPFC_ENABLE_FCP  1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+	uint32_t io_channel_irqs;	/* number of irqs for io channels */
+	struct nvmet_fc_target_port *targetport;
+	lpfc_vpd_t vpd;		/* vital product data */
+
+	struct pci_dev *pcidev;
+	struct list_head      work_list;
+	uint32_t              work_ha;      /* Host Attention Bits for WT */
+	uint32_t              work_ha_mask; /* HA Bits owned by WT        */
+	uint32_t              work_hs;      /* HS stored in case of ERRAT */
+	uint32_t              work_status[2]; /* Extra status from SLIM */
+
+	wait_queue_head_t    work_waitq;
+	struct task_struct   *worker_thread;
+	unsigned long data_flags;
+
+	uint32_t hbq_in_use;		/* HBQs in use flag */
+	uint32_t hbq_count;	        /* Count of configured HBQs */
+	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
+
+	atomic_t fcp_qidx;         /* next FCP WQ (RR Policy) */
+	atomic_t nvme_qidx;        /* next NVME WQ (RR Policy) */
+
+	phys_addr_t pci_bar0_map;     /* Physical address for PCI BAR0 */
+	phys_addr_t pci_bar1_map;     /* Physical address for PCI BAR1 */
+	phys_addr_t pci_bar2_map;     /* Physical address for PCI BAR2 */
+	void __iomem *slim_memmap_p;	/* Kernel memory mapped address for
+					   PCI BAR0 */
+	void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
+					    PCI BAR2 */
+
+	void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
+					    PCI BAR0 with dual-ULP support */
+	void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
+					    PCI BAR2 with dual-ULP support */
+	void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
+					    PCI BAR4 with dual-ULP support */
+#define PCI_64BIT_BAR0	0
+#define PCI_64BIT_BAR2	2
+#define PCI_64BIT_BAR4	4
+	void __iomem *MBslimaddr;	/* virtual address for mbox cmds */
+	void __iomem *HAregaddr;	/* virtual address for host attn reg */
+	void __iomem *CAregaddr;	/* virtual address for chip attn reg */
+	void __iomem *HSregaddr;	/* virtual address for host status
+					   reg */
+	void __iomem *HCregaddr;	/* virtual address for host ctl reg */
+
+	struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+	struct lpfc_pgp   *port_gp;
+	uint32_t __iomem  *hbq_put;     /* Address in SLIM to HBQ put ptrs */
+	uint32_t          *hbq_get;     /* Host mem address of HBQ get ptrs */
+
+	int brd_no;			/* FC board number */
+	char SerialNumber[32];		/* adapter Serial Number */
+	char OptionROMVersion[32];	/* adapter BIOS / Fcode version */
+	char ModelDesc[256];		/* Model Description */
+	char ModelName[80];		/* Model Name */
+	char ProgramType[256];		/* Program Type */
+	char Port[20];			/* Port No */
+	uint8_t vpd_flag;               /* VPD data flag */
+
+#define VPD_MODEL_DESC      0x1         /* valid vpd model description */
+#define VPD_MODEL_NAME      0x2         /* valid vpd model name */
+#define VPD_PROGRAM_TYPE    0x4         /* valid vpd program type */
+#define VPD_PORT            0x8         /* valid vpd port data */
+#define VPD_MASK            0xf         /* mask for any vpd data */
+
+	uint8_t soft_wwn_enable;
+
+	struct timer_list fcp_poll_timer;
+	struct timer_list eratt_poll;
+	uint32_t eratt_poll_interval;
+
+	/*
+	 * stat  counters
+	 */
+	atomic_t fc4ScsiInputRequests;
+	atomic_t fc4ScsiOutputRequests;
+	atomic_t fc4ScsiControlRequests;
+	atomic_t fc4ScsiIoCmpls;
+	atomic_t fc4NvmeInputRequests;
+	atomic_t fc4NvmeOutputRequests;
+	atomic_t fc4NvmeControlRequests;
+	atomic_t fc4NvmeIoCmpls;
+	atomic_t fc4NvmeLsRequests;
+	atomic_t fc4NvmeLsCmpls;
+
+	uint64_t bg_guard_err_cnt;
+	uint64_t bg_apptag_err_cnt;
+	uint64_t bg_reftag_err_cnt;
+
+	/* fastpath list. */
+	spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
+	spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
+	struct list_head lpfc_scsi_buf_list_get;
+	struct list_head lpfc_scsi_buf_list_put;
+	uint32_t total_scsi_bufs;
+	spinlock_t nvme_buf_list_get_lock;  /* NVME buf alloc list lock */
+	spinlock_t nvme_buf_list_put_lock;  /* NVME buf free list lock */
+	struct list_head lpfc_nvme_buf_list_get;
+	struct list_head lpfc_nvme_buf_list_put;
+	uint32_t total_nvme_bufs;
+	struct list_head lpfc_iocb_list;
+	uint32_t total_iocbq_bufs;
+	struct list_head active_rrq_list;
+	spinlock_t hbalock;
+
+	/* dma_mem_pools */
+	struct dma_pool *lpfc_sg_dma_buf_pool;
+	struct dma_pool *lpfc_mbuf_pool;
+	struct dma_pool *lpfc_hrb_pool;	/* header receive buffer pool */
+	struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
+	struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
+	struct dma_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
+	struct dma_pool *txrdy_payload_pool;
+	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
+
+	mempool_t *mbox_mem_pool;
+	mempool_t *nlp_mem_pool;
+	mempool_t *rrq_pool;
+	mempool_t *active_rrq_pool;
+
+	struct fc_host_statistics link_stats;
+	enum intr_type_t intr_type;
+	uint32_t intr_mode;
+#define LPFC_INTR_ERROR	0xFFFFFFFF
+	struct list_head port_list;
+	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
+	uint16_t max_vpi;		/* Maximum virtual nports */
+#define LPFC_MAX_VPI	0xFF		/* Max number VPI supported 0 - 0xff */
+#define LPFC_MAX_VPORTS	0x100		/* Max vports per port, with pport */
+	uint16_t max_vports;            /*
+					 * For IOV HBAs max_vpi can change
+					 * after a reset. max_vports is max
+					 * number of vports present. This can
+					 * be greater than max_vpi.
+					 */
+	uint16_t vpi_base;
+	uint16_t vfi_base;
+	unsigned long *vpi_bmask;	/* vpi allocation table */
+	uint16_t *vpi_ids;
+	uint16_t vpi_count;
+	struct list_head lpfc_vpi_blk_list;
+
+	/* Data structure used by fabric iocb scheduler */
+	struct list_head fabric_iocb_list;
+	atomic_t fabric_iocb_count;
+	struct timer_list fabric_block_timer;
+	unsigned long bit_flags;
+#define	FABRIC_COMANDS_BLOCKED	0
+	atomic_t num_rsrc_err;
+	atomic_t num_cmd_success;
+	unsigned long last_rsrc_error_time;
+	unsigned long last_ramp_down_time;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct dentry *hba_debugfs_root;
+	atomic_t debugfs_vport_count;
+	struct dentry *debug_hbqinfo;
+	struct dentry *debug_dumpHostSlim;
+	struct dentry *debug_dumpHBASlim;
+	struct dentry *debug_dumpData;   /* BlockGuard BPL */
+	struct dentry *debug_dumpDif;    /* BlockGuard BPL */
+	struct dentry *debug_InjErrLBA;  /* LBA to inject errors at */
+	struct dentry *debug_InjErrNPortID;  /* NPortID to inject errors at */
+	struct dentry *debug_InjErrWWPN;  /* WWPN to inject errors at */
+	struct dentry *debug_writeGuard; /* inject write guard_tag errors */
+	struct dentry *debug_writeApp;   /* inject write app_tag errors */
+	struct dentry *debug_writeRef;   /* inject write ref_tag errors */
+	struct dentry *debug_readGuard;  /* inject read guard_tag errors */
+	struct dentry *debug_readApp;    /* inject read app_tag errors */
+	struct dentry *debug_readRef;    /* inject read ref_tag errors */
+
+	struct dentry *debug_nvmeio_trc;
+	struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
+	atomic_t nvmeio_trc_cnt;
+	uint32_t nvmeio_trc_size;
+	uint32_t nvmeio_trc_output_idx;
+
+	/* T10 DIF error injection */
+	uint32_t lpfc_injerr_wgrd_cnt;
+	uint32_t lpfc_injerr_wapp_cnt;
+	uint32_t lpfc_injerr_wref_cnt;
+	uint32_t lpfc_injerr_rgrd_cnt;
+	uint32_t lpfc_injerr_rapp_cnt;
+	uint32_t lpfc_injerr_rref_cnt;
+	uint32_t lpfc_injerr_nportid;
+	struct lpfc_name lpfc_injerr_wwpn;
+	sector_t lpfc_injerr_lba;
+#define LPFC_INJERR_LBA_OFF	(sector_t)(-1)
+
+	struct dentry *debug_slow_ring_trc;
+	struct lpfc_debugfs_trc *slow_ring_trc;
+	atomic_t slow_ring_trc_cnt;
+	/* iDiag debugfs sub-directory */
+	struct dentry *idiag_root;
+	struct dentry *idiag_pci_cfg;
+	struct dentry *idiag_bar_acc;
+	struct dentry *idiag_que_info;
+	struct dentry *idiag_que_acc;
+	struct dentry *idiag_drb_acc;
+	struct dentry *idiag_ctl_acc;
+	struct dentry *idiag_mbx_acc;
+	struct dentry *idiag_ext_acc;
+	uint8_t lpfc_idiag_last_eq;
+#endif
+	uint16_t nvmeio_trc_on;
+
+	/* Used for deferred freeing of ELS data buffers */
+	struct list_head elsbuf;
+	int elsbuf_cnt;
+	int elsbuf_prev_cnt;
+
+	uint8_t temp_sensor_support;
+	/* Fields used for heart beat. */
+	unsigned long last_eqdelay_time;
+	unsigned long last_completion_time;
+	unsigned long skipped_hb;
+	struct timer_list hb_tmofunc;
+	uint8_t hb_outstanding;
+	struct timer_list rrq_tmr;
+	enum hba_temp_state over_temp_state;
+	/* ndlp reference management */
+	spinlock_t ndlp_lock;
+	/*
+	 * Following bit will be set for all buffer tags which are not
+	 * associated with any HBQ.
+	 */
+#define QUE_BUFTAG_BIT  (1<<31)
+	uint32_t buffer_tag_count;
+	int wait_4_mlo_maint_flg;
+	wait_queue_head_t wait_4_mlo_m_q;
+	/* data structure used for latency data collection */
+#define LPFC_NO_BUCKET	   0
+#define LPFC_LINEAR_BUCKET 1
+#define LPFC_POWER2_BUCKET 2
+	uint8_t  bucket_type;
+	uint32_t bucket_base;
+	uint32_t bucket_step;
+
+/* Maximum number of events that can be outstanding at any time*/
+#define LPFC_MAX_EVT_COUNT 512
+	atomic_t fast_event_count;
+	uint32_t fcoe_eventtag;
+	uint32_t fcoe_eventtag_at_fcf_scan;
+	uint32_t fcoe_cvl_eventtag;
+	uint32_t fcoe_cvl_eventtag_attn;
+	struct lpfc_fcf fcf;
+	uint8_t fc_map[3];
+	uint8_t valid_vlan;
+	uint16_t vlan_id;
+	struct list_head fcf_conn_rec_list;
+
+	spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
+	struct list_head ct_ev_waiters;
+	struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
+	uint32_t ctx_idx;
+
+	uint8_t menlo_flag;	/* menlo generic flags */
+#define HBA_MENLO_SUPPORT	0x1 /* HBA supports menlo commands */
+	uint32_t iocb_cnt;
+	uint32_t iocb_max;
+	atomic_t sdev_cnt;
+	uint8_t fips_spec_rev;
+	uint8_t fips_level;
+	spinlock_t devicelock;	/* lock for luns list */
+	mempool_t *device_data_mem_pool;
+	struct list_head luns;
+#define LPFC_TRANSGRESSION_HIGH_TEMPERATURE	0x0080
+#define LPFC_TRANSGRESSION_LOW_TEMPERATURE	0x0040
+#define LPFC_TRANSGRESSION_HIGH_VOLTAGE		0x0020
+#define LPFC_TRANSGRESSION_LOW_VOLTAGE		0x0010
+#define LPFC_TRANSGRESSION_HIGH_TXBIAS		0x0008
+#define LPFC_TRANSGRESSION_LOW_TXBIAS		0x0004
+#define LPFC_TRANSGRESSION_HIGH_TXPOWER		0x0002
+#define LPFC_TRANSGRESSION_LOW_TXPOWER		0x0001
+#define LPFC_TRANSGRESSION_HIGH_RXPOWER		0x8000
+#define LPFC_TRANSGRESSION_LOW_RXPOWER		0x4000
+	uint16_t sfp_alarm;
+	uint16_t sfp_warning;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+#define LPFC_CHECK_CPU_CNT    32
+	uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
+	uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
+	uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
+	uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT];
+	uint16_t cpucheck_on;
+#define LPFC_CHECK_OFF		0
+#define LPFC_CHECK_NVME_IO	1
+#define LPFC_CHECK_NVMET_RCV	2
+#define LPFC_CHECK_NVMET_IO	4
+	uint16_t ktime_on;
+	uint64_t ktime_data_samples;
+	uint64_t ktime_status_samples;
+	uint64_t ktime_last_cmd;
+	uint64_t ktime_seg1_total;
+	uint64_t ktime_seg1_min;
+	uint64_t ktime_seg1_max;
+	uint64_t ktime_seg2_total;
+	uint64_t ktime_seg2_min;
+	uint64_t ktime_seg2_max;
+	uint64_t ktime_seg3_total;
+	uint64_t ktime_seg3_min;
+	uint64_t ktime_seg3_max;
+	uint64_t ktime_seg4_total;
+	uint64_t ktime_seg4_min;
+	uint64_t ktime_seg4_max;
+	uint64_t ktime_seg5_total;
+	uint64_t ktime_seg5_min;
+	uint64_t ktime_seg5_max;
+	uint64_t ktime_seg6_total;
+	uint64_t ktime_seg6_min;
+	uint64_t ktime_seg6_max;
+	uint64_t ktime_seg7_total;
+	uint64_t ktime_seg7_min;
+	uint64_t ktime_seg7_max;
+	uint64_t ktime_seg8_total;
+	uint64_t ktime_seg8_min;
+	uint64_t ktime_seg8_max;
+	uint64_t ktime_seg9_total;
+	uint64_t ktime_seg9_min;
+	uint64_t ktime_seg9_max;
+	uint64_t ktime_seg10_total;
+	uint64_t ktime_seg10_min;
+	uint64_t ktime_seg10_max;
+#endif
+};
+
+static inline struct Scsi_Host *
+lpfc_shost_from_vport(struct lpfc_vport *vport)
+{
+	return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
+}
+
+static inline void
+lpfc_set_loopback_flag(struct lpfc_hba *phba)
+{
+	if (phba->cfg_topology == FLAGS_LOCAL_LB)
+		phba->link_flag |= LS_LOOPBACK_MODE;
+	else
+		phba->link_flag &= ~LS_LOOPBACK_MODE;
+}
+
+static inline int
+lpfc_is_link_up(struct lpfc_hba *phba)
+{
+	return  phba->link_state == LPFC_LINK_UP ||
+		phba->link_state == LPFC_CLEAR_LA ||
+		phba->link_state == LPFC_HBA_READY;
+}
+
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+	/* Set the lpfc data pending flag */
+	set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+	/* Wake up worker thread */
+	wake_up(&phba->work_waitq);
+	return;
+}
+
+static inline int
+lpfc_readl(void __iomem *addr, uint32_t *data)
+{
+	uint32_t temp;
+	temp = readl(addr);
+	if (temp == 0xffffffff)
+		return -EIO;
+	*data = temp;
+	return 0;
+}
+
+static inline int
+lpfc_sli_read_hs(struct lpfc_hba *phba)
+{
+	/*
+	 * There was a link/board error. Read the status register to retrieve
+	 * the error event and process it.
+	 */
+	phba->sli.slistat.err_attn_event++;
+
+	/* Save status info and check for unplug error */
+	if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
+		lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
+		lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
+		return -EIO;
+	}
+
+	/* Clear chip Host Attention error bit */
+	writel(HA_ERATT, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	phba->pport->stopped = 1;
+
+	return 0;
+}
+
+static inline struct lpfc_sli_ring *
+lpfc_phba_elsring(struct lpfc_hba *phba)
+{
+	/* Return NULL if sli_rev has become invalid due to bad fw */
+	if (phba->sli_rev != LPFC_SLI_REV4  &&
+	    phba->sli_rev != LPFC_SLI_REV3  &&
+	    phba->sli_rev != LPFC_SLI_REV2)
+		return NULL;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (phba->sli4_hba.els_wq)
+			return phba->sli4_hba.els_wq->pring;
+		else
+			return NULL;
+	}
+	return &phba->sli.sli3_ring[LPFC_ELS_RING];
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_attr.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_attr.c
new file mode 100644
index 0000000..f447355
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_attr.c
@@ -0,0 +1,6416 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/aer.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_attr.h"
+
+#define LPFC_DEF_DEVLOSS_TMO	30
+#define LPFC_MIN_DEVLOSS_TMO	1
+#define LPFC_MAX_DEVLOSS_TMO	255
+
+#define LPFC_DEF_MRQ_POST	512
+#define LPFC_MIN_MRQ_POST	512
+#define LPFC_MAX_MRQ_POST	2048
+
+/*
+ * Write key size should be multiple of 4. If write key is changed
+ * make sure that library write key is also changed.
+ */
+#define LPFC_REG_WRITE_KEY_SIZE	4
+#define LPFC_REG_WRITE_KEY	"EMLX"
+
+/**
+ * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
+ * @incr: integer to convert.
+ * @hdw: ascii string holding converted integer plus a string terminator.
+ *
+ * Description:
+ * JEDEC Joint Electron Device Engineering Council.
+ * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
+ * character string. The string is then terminated with a NULL in byte 9.
+ * Hex 0-9 becomes ascii '0' to '9'.
+ * Hex a-f becomes ascii '=' to 'B' capital B.
+ *
+ * Notes:
+ * Coded for 32 bit integers only.
+ **/
+static void
+lpfc_jedec_to_ascii(int incr, char hdw[])
+{
+	int i, j;
+	for (i = 0; i < 8; i++) {
+		j = (incr & 0xf);
+		if (j <= 9)
+			hdw[7 - i] = 0x30 +  j;
+		 else
+			hdw[7 - i] = 0x61 + j - 10;
+		incr = (incr >> 4);
+	}
+	hdw[8] = 0;
+	return;
+}
+
+/**
+ * lpfc_drvr_version_show - Return the Emulex driver string with version number
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+}
+
+/**
+ * lpfc_enable_fip_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->hba_flag & HBA_FIP_SUPPORT)
+		return snprintf(buf, PAGE_SIZE, "1\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
+static ssize_t
+lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nodelist *ndlp;
+	struct nvme_fc_remote_port *nrport;
+	uint64_t data1, data2, data3, tot;
+	char *statep;
+	int len = 0;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+		len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
+		return len;
+	}
+	if (phba->nvmet_support) {
+		if (!phba->targetport) {
+			len = snprintf(buf, PAGE_SIZE,
+					"NVME Target: x%llx is not allocated\n",
+					wwn_to_u64(vport->fc_portname.u.wwn));
+			return len;
+		}
+		/* Port state is only one of two values for now. */
+		if (phba->targetport->port_id)
+			statep = "REGISTERED";
+		else
+			statep = "INIT";
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"NVME Target Enabled  State %s\n",
+				statep);
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
+				"NVME Target: lpfc",
+				phba->brd_no,
+				wwn_to_u64(vport->fc_portname.u.wwn),
+				wwn_to_u64(vport->fc_nodename.u.wwn),
+				phba->targetport->port_id);
+
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"\nNVME Target: Statistics\n");
+		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+		len += snprintf(buf+len, PAGE_SIZE-len,
+				"LS: Rcv %08x Drop %08x Abort %08x\n",
+				atomic_read(&tgtp->rcv_ls_req_in),
+				atomic_read(&tgtp->rcv_ls_req_drop),
+				atomic_read(&tgtp->xmt_ls_abort));
+		if (atomic_read(&tgtp->rcv_ls_req_in) !=
+		    atomic_read(&tgtp->rcv_ls_req_out)) {
+			len += snprintf(buf+len, PAGE_SIZE-len,
+					"Rcv LS: in %08x != out %08x\n",
+					atomic_read(&tgtp->rcv_ls_req_in),
+					atomic_read(&tgtp->rcv_ls_req_out));
+		}
+
+		len += snprintf(buf+len, PAGE_SIZE-len,
+				"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+				atomic_read(&tgtp->xmt_ls_rsp),
+				atomic_read(&tgtp->xmt_ls_drop),
+				atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+				atomic_read(&tgtp->xmt_ls_rsp_error));
+
+		len += snprintf(buf+len, PAGE_SIZE-len,
+				"FCP: Rcv %08x Defer %08x Release %08x "
+				"Drop %08x\n",
+				atomic_read(&tgtp->rcv_fcp_cmd_in),
+				atomic_read(&tgtp->rcv_fcp_cmd_defer),
+				atomic_read(&tgtp->xmt_fcp_release),
+				atomic_read(&tgtp->rcv_fcp_cmd_drop));
+
+		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
+		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
+			len += snprintf(buf+len, PAGE_SIZE-len,
+					"Rcv FCP: in %08x != out %08x\n",
+					atomic_read(&tgtp->rcv_fcp_cmd_in),
+					atomic_read(&tgtp->rcv_fcp_cmd_out));
+		}
+
+		len += snprintf(buf+len, PAGE_SIZE-len,
+				"FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
+				"drop %08x\n",
+				atomic_read(&tgtp->xmt_fcp_read),
+				atomic_read(&tgtp->xmt_fcp_read_rsp),
+				atomic_read(&tgtp->xmt_fcp_write),
+				atomic_read(&tgtp->xmt_fcp_rsp),
+				atomic_read(&tgtp->xmt_fcp_drop));
+
+		len += snprintf(buf+len, PAGE_SIZE-len,
+				"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+				atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+				atomic_read(&tgtp->xmt_fcp_rsp_error),
+				atomic_read(&tgtp->xmt_fcp_rsp_drop));
+
+		len += snprintf(buf+len, PAGE_SIZE-len,
+				"ABORT: Xmt %08x Cmpl %08x\n",
+				atomic_read(&tgtp->xmt_fcp_abort),
+				atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
+				atomic_read(&tgtp->xmt_abort_sol),
+				atomic_read(&tgtp->xmt_abort_unsol),
+				atomic_read(&tgtp->xmt_abort_rsp),
+				atomic_read(&tgtp->xmt_abort_rsp_error));
+
+		/* Calculate outstanding IOs */
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+		tot += atomic_read(&tgtp->xmt_fcp_release);
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
+
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
+				"CTX Outstanding %08llx\n",
+				phba->sli4_hba.nvmet_xri_cnt,
+				phba->sli4_hba.nvmet_io_wait_cnt,
+				phba->sli4_hba.nvmet_io_wait_total,
+				tot);
+
+		len +=  snprintf(buf+len, PAGE_SIZE-len, "\n");
+		return len;
+	}
+
+	localport = vport->localport;
+	if (!localport) {
+		len = snprintf(buf, PAGE_SIZE,
+				"NVME Initiator x%llx is not allocated\n",
+				wwn_to_u64(vport->fc_portname.u.wwn));
+		return len;
+	}
+	len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
+
+	spin_lock_irq(shost->host_lock);
+
+	/* Port state is only one of two values for now. */
+	if (localport->port_id)
+		statep = "ONLINE";
+	else
+		statep = "UNKNOWN ";
+
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
+			"NVME LPORT lpfc",
+			phba->brd_no,
+			wwn_to_u64(vport->fc_portname.u.wwn),
+			wwn_to_u64(vport->fc_nodename.u.wwn),
+			localport->port_id, statep);
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!ndlp->nrport)
+			continue;
+
+		/* local short-hand pointer. */
+		nrport = ndlp->nrport->remoteport;
+
+		/* Port state is only one of two values for now. */
+		switch (nrport->port_state) {
+		case FC_OBJSTATE_ONLINE:
+			statep = "ONLINE";
+			break;
+		case FC_OBJSTATE_UNKNOWN:
+			statep = "UNKNOWN ";
+			break;
+		default:
+			statep = "UNSUPPORTED";
+			break;
+		}
+
+		/* Tab in to show lport ownership. */
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"NVME RPORT       ");
+		if (phba->brd_no >= 10)
+			len += snprintf(buf + len, PAGE_SIZE - len, " ");
+
+		len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
+				nrport->port_name);
+		len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
+				nrport->node_name);
+		len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
+				nrport->port_id);
+
+		/* An NVME rport can have multiple roles. */
+		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
+			len +=  snprintf(buf + len, PAGE_SIZE - len,
+					 "INITIATOR ");
+		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
+			len +=  snprintf(buf + len, PAGE_SIZE - len,
+					 "TARGET ");
+		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
+			len +=  snprintf(buf + len, PAGE_SIZE - len,
+					 "DISCSRVC ");
+		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+					  FC_PORT_ROLE_NVME_TARGET |
+					  FC_PORT_ROLE_NVME_DISCOVERY))
+			len +=  snprintf(buf + len, PAGE_SIZE - len,
+					 "UNKNOWN ROLE x%x",
+					 nrport->port_role);
+
+		len +=  snprintf(buf + len, PAGE_SIZE - len, "%s  ", statep);
+		/* Terminate the string. */
+		len +=  snprintf(buf + len, PAGE_SIZE - len, "\n");
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
+	len += snprintf(buf+len, PAGE_SIZE-len,
+			"LS: Xmt %016x Cmpl %016x\n",
+			atomic_read(&phba->fc4NvmeLsRequests),
+			atomic_read(&phba->fc4NvmeLsCmpls));
+
+	tot = atomic_read(&phba->fc4NvmeIoCmpls);
+	data1 = atomic_read(&phba->fc4NvmeInputRequests);
+	data2 = atomic_read(&phba->fc4NvmeOutputRequests);
+	data3 = atomic_read(&phba->fc4NvmeControlRequests);
+	len += snprintf(buf+len, PAGE_SIZE-len,
+			"FCP: Rd %016llx Wr %016llx IO %016llx\n",
+			data1, data2, data3);
+
+	len += snprintf(buf+len, PAGE_SIZE-len,
+			"    Cmpl %016llx Outstanding %016llx\n",
+			tot, (data1 + data2 + data3) - tot);
+	return len;
+}
+
+static ssize_t
+lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->cfg_enable_bg)
+		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+			return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+		else
+			return snprintf(buf, PAGE_SIZE,
+					"BlockGuard Not Supported\n");
+	else
+			return snprintf(buf, PAGE_SIZE,
+					"BlockGuard Disabled\n");
+}
+
+static ssize_t
+lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_guard_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_apptag_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_reftag_err_cnt);
+}
+
+/**
+ * lpfc_info_show - Return some pci info about the host in ascii
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text from lpfc_info().
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_info_show(struct device *dev, struct device_attribute *attr,
+	       char *buf)
+{
+	struct Scsi_Host *host = class_to_shost(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+}
+
+/**
+ * lpfc_serialnum_show - Return the hba serial number in ascii
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text serial number.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+}
+
+/**
+ * lpfc_temp_sensor_show - Return the temperature sensor level
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns a number indicating the temperature sensor level currently
+ * supported, zero or one in ascii.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+}
+
+/**
+ * lpfc_modeldesc_show - Return the model description of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model description.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+}
+
+/**
+ * lpfc_modelname_show - Return the model name of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model name.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+}
+
+/**
+ * lpfc_programtype_show - Return the program type of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+}
+
+/**
+ * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the Menlo Maintenance sli flag.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
+}
+
+/**
+ * lpfc_vportnum_show - Return the port number in ascii of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+}
+
+/**
+ * lpfc_fwrev_show - Return the firmware rev running in the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t if_type;
+	uint8_t sli_family;
+	char fwrev[FW_REV_STR_SIZE];
+	int len;
+
+	lpfc_decode_firmware_rev(phba, fwrev, 1);
+	if_type = phba->sli4_hba.pc_sli4_params.if_type;
+	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+			       fwrev, phba->sli_rev);
+	else
+		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+			       fwrev, phba->sli_rev, if_type, sli_family);
+
+	return len;
+}
+
+/**
+ * lpfc_hdw_show - Return the jedec information about the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	char hdw[9];
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	lpfc_vpd_t *vp = &phba->vpd;
+
+	lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
+	return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+}
+
+/**
+ * lpfc_option_rom_version_show - Return the adapter ROM FCode version
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ROM and FCode ascii strings.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	char fwrev[FW_REV_STR_SIZE];
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+
+	lpfc_decode_firmware_rev(phba, fwrev, 1);
+	return snprintf(buf, PAGE_SIZE, "%s\n", fwrev);
+}
+
+/**
+ * lpfc_state_show - Return the link state of the port
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Notes:
+ * The switch statement has no default so zero will be returned.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int  len = 0;
+
+	switch (phba->link_state) {
+	case LPFC_LINK_UNKNOWN:
+	case LPFC_WARM_START:
+	case LPFC_INIT_START:
+	case LPFC_INIT_MBX_CMDS:
+	case LPFC_LINK_DOWN:
+	case LPFC_HBA_ERROR:
+		if (phba->hba_flag & LINK_DISABLED)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+				"Link Down - User disabled\n");
+		else
+			len += snprintf(buf + len, PAGE_SIZE-len,
+				"Link Down\n");
+		break;
+	case LPFC_LINK_UP:
+	case LPFC_CLEAR_LA:
+	case LPFC_HBA_READY:
+		len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+
+		switch (vport->port_state) {
+		case LPFC_LOCAL_CFG_LINK:
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"Configuring Link\n");
+			break;
+		case LPFC_FDISC:
+		case LPFC_FLOGI:
+		case LPFC_FABRIC_CFG_LINK:
+		case LPFC_NS_REG:
+		case LPFC_NS_QRY:
+		case LPFC_BUILD_DISC_LIST:
+		case LPFC_DISC_AUTH:
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"Discovery\n");
+			break;
+		case LPFC_VPORT_READY:
+			len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+			break;
+
+		case LPFC_VPORT_FAILED:
+			len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+			break;
+
+		case LPFC_VPORT_UNKNOWN:
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"Unknown\n");
+			break;
+		}
+		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"   Menlo Maint Mode\n");
+		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			if (vport->fc_flag & FC_PUBLIC_LOOP)
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Public Loop\n");
+			else
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Private Loop\n");
+		} else {
+			if (vport->fc_flag & FC_FABRIC)
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Fabric\n");
+			else
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Point-2-Point\n");
+		}
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return snprintf(buf, PAGE_SIZE, "fc\n");
+
+	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+			return snprintf(buf, PAGE_SIZE, "fcoe\n");
+		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+			return snprintf(buf, PAGE_SIZE, "fc\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+/**
+ * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
+ *			    (OAS) is supported.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			phba->sli4_hba.pc_sli4_params.oas_supported);
+}
+
+/**
+ * lpfc_link_state_store - Transition the link_state on an HBA port
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if the buffer is not "up" or "down"
+ * return from link state change function if non-zero
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	int status = -EINVAL;
+
+	if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
+			(phba->link_state == LPFC_LINK_DOWN))
+		status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+	else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
+			(phba->link_state >= LPFC_LINK_UP))
+		status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
+
+	if (status == 0)
+		return strlen(buf);
+	else
+		return status;
+}
+
+/**
+ * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the sum of fc mapped and unmapped.
+ *
+ * Description:
+ * Returns the ascii text number of the sum of the fc mapped and unmapped
+ * vport counts.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_num_discovered_ports_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			vport->fc_map_cnt + vport->fc_unmap_cnt);
+}
+
+/**
+ * lpfc_issue_lip - Misnomer, name carried over from long ago
+ * @shost: Scsi_Host pointer.
+ *
+ * Description:
+ * Bring the link down gracefully then re-init the link. The firmware will
+ * re-init the fiber channel interface as required. Does not issue a LIP.
+ *
+ * Returns:
+ * -EPERM port offline or management commands are being blocked
+ * -ENOMEM cannot allocate memory for the mailbox command
+ * -EIO error sending the mailbox command
+ * zero for success
+ **/
+static int
+lpfc_issue_lip(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	LPFC_MBOXQ_t *pmboxq;
+	int mbxstatus = MBXERR_ERROR;
+
+	/*
+	 * If the link is offline, disabled or BLOCK_MGMT_IO
+	 * it doesn't make any sense to allow issue_lip
+	 */
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (phba->hba_flag & LINK_DISABLED) ||
+	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
+		return -EPERM;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+
+	if (!pmboxq)
+		return -ENOMEM;
+
+	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
+
+	if ((mbxstatus == MBX_SUCCESS) &&
+	    (pmboxq->u.mb.mbxStatus == 0 ||
+	     pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
+		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
+			       phba->cfg_link_speed);
+		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+						     phba->fc_ratov * 2);
+		if ((mbxstatus == MBX_SUCCESS) &&
+		    (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"2859 SLI authentication is required "
+					"for INIT_LINK but has not done yet\n");
+	}
+
+	lpfc_set_loopback_flag(phba);
+	if (mbxstatus != MBX_TIMEOUT)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	if (mbxstatus == MBXERR_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+int
+lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
+{
+	int cnt = 0;
+
+	spin_lock_irq(lock);
+	while (!list_empty(q)) {
+		spin_unlock_irq(lock);
+		msleep(20);
+		if (cnt++ > 250) {  /* 5 secs */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"0466 %s %s\n",
+					"Outstanding IO when ",
+					"bringing Adapter offline\n");
+				return 0;
+		}
+		spin_lock_irq(lock);
+	}
+	spin_unlock_irq(lock);
+	return 1;
+}
+
+/**
+ * lpfc_do_offline - Issues a mailbox command to bring the link down
+ * @phba: lpfc_hba pointer.
+ * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Can wait up to 5 seconds for the port ring buffers count
+ * to reach zero, prints a warning if it is not zero and continues.
+ * lpfc_workq_post_event() returns a non-zero return code if call fails.
+ *
+ * Returns:
+ * -EIO error posting the event
+ * zero for success
+ **/
+static int
+lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
+{
+	struct completion online_compl;
+	struct lpfc_queue *qp = NULL;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_sli *psli;
+	int status = 0;
+	int i;
+	int rc;
+
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl,
+			      LPFC_EVT_OFFLINE_PREP);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+
+	if (status != 0)
+		return -EIO;
+
+	psli = &phba->sli;
+
+	/* Wait a little for things to settle down, but not
+	 * long enough for dev loss timeout to expire.
+	 */
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		for (i = 0; i < psli->num_rings; i++) {
+			pring = &psli->sli3_ring[i];
+			if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+					      &phba->hbalock))
+				goto out;
+		}
+	} else {
+		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+			pring = qp->pring;
+			if (!pring)
+				continue;
+			if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+					      &pring->ring_lock))
+				goto out;
+		}
+	}
+out:
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+
+	if (status != 0)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_selective_reset - Offline then onlines the port
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * If the port is configured to allow a reset then the hba is brought
+ * offline then online.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
+ *
+ * Returns:
+ * lpfc_do_offline() return code if not zero
+ * -EIO reset not configured or error posting the event
+ * zero for success
+ **/
+int
+lpfc_selective_reset(struct lpfc_hba *phba)
+{
+	struct completion online_compl;
+	int status = 0;
+	int rc;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+
+	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+		if (status != 0)
+			return status;
+	}
+
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl,
+			      LPFC_EVT_ONLINE);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+
+	if (status != 0)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_issue_reset - Selectively resets an adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the buf contains the string "selective" then lpfc_selective_reset()
+ * is called to perform the reset.
+ *
+ * Notes:
+ * Assumes any error from lpfc_selective_reset() will be negative.
+ * If lpfc_selective_reset() returns zero then the length of the buffer
+ * is returned which indicates success
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain the string "selective"
+ * length of buf if lpfc-selective_reset() if the call succeeds
+ * return value of lpfc_selective_reset() if the call fails
+**/
+static ssize_t
+lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
+		 const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int status = -EINVAL;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+
+	if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
+		status = phba->lpfc_selective_reset(phba);
+
+	if (status == 0)
+		return strlen(buf);
+	else
+		return status;
+}
+
+/**
+ * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * SLI4 interface type-2 device to wait on the sliport status register for
+ * the readyness after performing a firmware reset.
+ *
+ * Returns:
+ * zero for success, -EPERM when port does not have privilege to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
+ **/
+int
+lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
+{
+	struct lpfc_register portstat_reg = {0};
+	int i;
+
+	msleep(100);
+	lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+		   &portstat_reg.word0);
+
+	/* verify if privileged for the request operation */
+	if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
+	    !bf_get(lpfc_sliport_status_err, &portstat_reg))
+		return -EPERM;
+
+	/* wait for the SLI port firmware ready after firmware reset */
+	for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
+		msleep(10);
+		lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+			   &portstat_reg.word0);
+		if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
+			continue;
+		if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
+			continue;
+		if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
+			continue;
+		break;
+	}
+
+	if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
+		return 0;
+	else
+		return -EIO;
+}
+
+/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+	struct completion online_compl;
+	struct pci_dev *pdev = phba->pcidev;
+	uint32_t before_fc_flag;
+	uint32_t sriov_nr_virtfn;
+	uint32_t reg_val;
+	int status = 0, rc = 0;
+	int job_posted = 1, sriov_err;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+
+	if ((phba->sli_rev < LPFC_SLI_REV4) ||
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+	     LPFC_SLI_INTF_IF_TYPE_2))
+		return -EPERM;
+
+	/* Keep state if we need to restore back */
+	before_fc_flag = phba->pport->fc_flag;
+	sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
+	/* Disable SR-IOV virtual functions if enabled */
+	if (phba->cfg_sriov_nr_virtfn) {
+		pci_disable_sriov(pdev);
+		phba->cfg_sriov_nr_virtfn = 0;
+	}
+
+	if (opcode == LPFC_FW_DUMP)
+		phba->hba_flag |= HBA_FW_DUMP_OP;
+
+	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+	if (status != 0) {
+		phba->hba_flag &= ~HBA_FW_DUMP_OP;
+		return status;
+	}
+
+	/* wait for the device to be quiesced before firmware reset */
+	msleep(100);
+
+	reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+			LPFC_CTL_PDEV_CTL_OFFSET);
+
+	if (opcode == LPFC_FW_DUMP)
+		reg_val |= LPFC_FW_DUMP_REQUEST;
+	else if (opcode == LPFC_FW_RESET)
+		reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+	else if (opcode == LPFC_DV_RESET)
+		reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+	writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+	       LPFC_CTL_PDEV_CTL_OFFSET);
+	/* flush */
+	readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+	/* delay driver action following IF_TYPE_2 reset */
+	rc = lpfc_sli4_pdev_status_reg_wait(phba);
+
+	if (rc == -EPERM) {
+		/* no privilege for reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3150 No privilege to perform the requested "
+				"access: x%x\n", reg_val);
+	} else if (rc == -EIO) {
+		/* reset failed, there is nothing more we can do */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3153 Fail to perform the requested "
+				"access: x%x\n", reg_val);
+		return rc;
+	}
+
+	/* keep the original port state */
+	if (before_fc_flag & FC_OFFLINE_MODE)
+		goto out;
+
+	init_completion(&online_compl);
+	job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+					   LPFC_EVT_ONLINE);
+	if (!job_posted)
+		goto out;
+
+	wait_for_completion(&online_compl);
+
+out:
+	/* in any case, restore the virtual functions enabled as before */
+	if (sriov_nr_virtfn) {
+		sriov_err =
+			lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+		if (!sriov_err)
+			phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+	}
+
+	/* return proper error code */
+	if (!rc) {
+		if (!job_posted)
+			rc = -ENOMEM;
+		else if (status)
+			rc = -EIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_nport_evt_cnt_show - Return the number of nport events
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ascii number of nport events.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+}
+
+/**
+ * lpfc_board_mode_show - Return the state of the board
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the state of the adapter.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	char  * state;
+
+	if (phba->link_state == LPFC_HBA_ERROR)
+		state = "error";
+	else if (phba->link_state == LPFC_WARM_START)
+		state = "warm start";
+	else if (phba->link_state == LPFC_INIT_START)
+		state = "offline";
+	else
+		state = "online";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", state);
+}
+
+/**
+ * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing one of the strings "online", "offline", "warm" or "error".
+ * @count: unused variable.
+ *
+ * Returns:
+ * -EACCES if enable hba reset not enabled
+ * -EINVAL if the buffer does not contain a valid string (see above)
+ * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
+ * buf length greater than zero indicates success
+ **/
+static ssize_t
+lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct completion online_compl;
+	char *board_mode_str = NULL;
+	int status = 0;
+	int rc;
+
+	if (!phba->cfg_enable_hba_reset) {
+		status = -EACCES;
+		goto board_mode_out;
+	}
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "3050 lpfc_board_mode set to %s\n", buf);
+
+	init_completion(&online_compl);
+
+	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
+		rc = lpfc_workq_post_event(phba, &status, &online_compl,
+				      LPFC_EVT_ONLINE);
+		if (rc == 0) {
+			status = -ENOMEM;
+			goto board_mode_out;
+		}
+		wait_for_completion(&online_compl);
+		if (status)
+			status = -EIO;
+	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
+		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+	else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			status = -EINVAL;
+		else
+			status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
+	else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			status = -EINVAL;
+		else
+			status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+	else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+	else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+	else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+		status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
+	else
+		status = -EINVAL;
+
+board_mode_out:
+	if (!status)
+		return strlen(buf);
+	else {
+		board_mode_str = strchr(buf, '\n');
+		if (board_mode_str)
+			*board_mode_str = '\0';
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "3097 Failed \"%s\", status(%d), "
+				 "fc_flag(x%x)\n",
+				 buf, status, phba->pport->fc_flag);
+		return status;
+	}
+}
+
+/**
+ * lpfc_get_hba_info - Return various bits of informaton about the adapter
+ * @phba: pointer to the adapter structure.
+ * @mxri: max xri count.
+ * @axri: available xri count.
+ * @mrpi: max rpi count.
+ * @arpi: available rpi count.
+ * @mvpi: max vpi count.
+ * @avpi: available vpi count.
+ *
+ * Description:
+ * If an integer pointer for an count is not null then the value for the
+ * count is returned.
+ *
+ * Returns:
+ * zero on error
+ * one for success
+ **/
+static int
+lpfc_get_hba_info(struct lpfc_hba *phba,
+		  uint32_t *mxri, uint32_t *axri,
+		  uint32_t *mrpi, uint32_t *arpi,
+		  uint32_t *mvpi, uint32_t *avpi)
+{
+	struct lpfc_mbx_read_config *rd_config;
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	int rc = 0;
+	uint32_t max_vpi;
+
+	/*
+	 * prevent udev from issuing mailbox commands until the port is
+	 * configured.
+	 */
+	if (phba->link_state < LPFC_LINK_DOWN ||
+	    !phba->mbox_mem_pool ||
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
+		return 0;
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+		return 0;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return 0;
+	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	pmb->mbxCommand = MBX_READ_CONFIG;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+
+	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+		rc = MBX_NOT_FINISHED;
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return 0;
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rd_config = &pmboxq->u.mqe.un.rd_config;
+		if (mrpi)
+			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		if (arpi)
+			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.rpi_used;
+		if (mxri)
+			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		if (axri)
+			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.xri_used;
+
+		/* Account for differences with SLI-3.  Get vpi count from
+		 * mailbox data and subtract one for max vpi value.
+		 */
+		max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
+			(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+
+		/* Limit the max we support */
+		if (max_vpi > LPFC_MAX_VPI)
+			max_vpi = LPFC_MAX_VPI;
+		if (mvpi)
+			*mvpi = max_vpi;
+		if (avpi)
+			*avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
+	} else {
+		if (mrpi)
+			*mrpi = pmb->un.varRdConfig.max_rpi;
+		if (arpi)
+			*arpi = pmb->un.varRdConfig.avail_rpi;
+		if (mxri)
+			*mxri = pmb->un.varRdConfig.max_xri;
+		if (axri)
+			*axri = pmb->un.varRdConfig.avail_xri;
+		if (mvpi)
+			*mvpi = pmb->un.varRdConfig.max_vpi;
+		if (avpi) {
+			/* avail_vpi is only valid if link is up and ready */
+			if (phba->link_state == LPFC_HBA_READY)
+				*avpi = pmb->un.varRdConfig.avail_vpi;
+			else
+				*avpi = pmb->un.varRdConfig.max_vpi;
+		}
+	}
+
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return 1;
+}
+
+/**
+ * lpfc_max_rpi_show - Return maximum rpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_rpi_show - Return maximum rpi minus available rpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the used rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt, acnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_max_xri_show - Return maximum xri
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt;
+
+	if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_xri_show - Return maximum xpi minus the available xpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt, acnt;
+
+	if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_max_vpi_show - Return maximum vpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt, acnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_npiv_info_show - Return text about NPIV support for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: text that must be interpreted to determine if npiv is supported.
+ *
+ * Description:
+ * Buffer will contain text indicating npiv is not suppoerted on the port,
+ * the port is an NPIV physical port, or it is an npiv virtual port with
+ * the id of the vport.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (!(phba->max_vpi))
+		return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+	if (vport->port_type == LPFC_PHYSICAL_PORT)
+		return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+	return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+}
+
+/**
+ * lpfc_poll_show - Return text about poll support for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the cfg_poll in hex.
+ *
+ * Notes:
+ * cfg_poll should be a lpfc_polling_flags type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_poll_show(struct device *dev, struct device_attribute *attr,
+	       char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+}
+
+/**
+ * lpfc_poll_store - Set the value of cfg_poll for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Notes:
+ * buf contents converted to integer and checked for a valid value.
+ *
+ * Returns:
+ * -EINVAL if the buffer connot be converted or is out of range
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_poll_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t creg_val;
+	uint32_t old_val;
+	int val=0;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if ((val & 0x3) != val)
+		return -EINVAL;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		val = 0;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+		"3051 lpfc_poll changed from %d to %d\n",
+		phba->cfg_poll, val);
+
+	spin_lock_irq(&phba->hbalock);
+
+	old_val = phba->cfg_poll;
+
+	if (val & ENABLE_FCP_RING_POLLING) {
+		if ((val & DISABLE_FCP_RING_INT) &&
+		    !(old_val & DISABLE_FCP_RING_INT)) {
+			if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+				spin_unlock_irq(&phba->hbalock);
+				return -EINVAL;
+			}
+			creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+			writel(creg_val, phba->HCregaddr);
+			readl(phba->HCregaddr); /* flush */
+
+			lpfc_poll_start_timer(phba);
+		}
+	} else if (val != 0x0) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EINVAL;
+	}
+
+	if (!(val & DISABLE_FCP_RING_INT) &&
+	    (old_val & DISABLE_FCP_RING_INT))
+	{
+		spin_unlock_irq(&phba->hbalock);
+		del_timer(&phba->fcp_poll_timer);
+		spin_lock_irq(&phba->hbalock);
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			spin_unlock_irq(&phba->hbalock);
+			return -EINVAL;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	phba->cfg_poll = val;
+
+	spin_unlock_irq(&phba->hbalock);
+
+	return strlen(buf);
+}
+
+/**
+ * lpfc_fips_level_show - Return the current FIPS level for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_level_show(struct device *dev,  struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+}
+
+/**
+ * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_rev_show(struct device *dev,  struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+}
+
+/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+	      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+			(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+			(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+				"" : "Not ");
+}
+
+/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	uint16_t max_nr_virtfn;
+
+	max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+	return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+}
+
+static inline bool lpfc_rangecheck(uint val, uint min, uint max)
+{
+	return val >= min && val <= max;
+}
+
+/**
+ * lpfc_enable_bbcr_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ */
+static ssize_t
+lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
+{
+	if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3068 %s_enable_bbcr changed from %d to %d\n",
+				LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
+		phba->cfg_enable_bbcr = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
+			LPFC_DRIVER_NAME, val);
+	return -EINVAL;
+}
+
+/**
+ * lpfc_param_show - Return a cfg attribute value in decimal
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show.
+ *
+ * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_param_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	struct lpfc_hba   *phba = vport->phba;\
+	return snprintf(buf, PAGE_SIZE, "%d\n",\
+			phba->cfg_##attr);\
+}
+
+/**
+ * lpfc_param_hex_show - Return a cfg attribute value in hex
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_param_hex_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	struct lpfc_hba   *phba = vport->phba;\
+	uint val = 0;\
+	val = phba->cfg_##attr;\
+	return snprintf(buf, PAGE_SIZE, "%#x\n",\
+			phba->cfg_##attr);\
+}
+
+/**
+ * lpfc_param_init - Initializes a cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: Initializes an attribute.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Validates the min and max values then sets the adapter config field
+ * accordingly, or uses the default if out of range and prints an error message.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
+#define lpfc_param_init(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
+{ \
+	if (lpfc_rangecheck(val, minval, maxval)) {\
+		phba->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+			"0449 lpfc_"#attr" attribute cannot be set to %d, "\
+			"allowed range is ["#minval", "#maxval"]\n", val); \
+	phba->cfg_##attr = default;\
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_param_set - Set a cfg attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
+#define lpfc_param_set(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
+{ \
+	if (lpfc_rangecheck(val, minval, maxval)) {\
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+			"3052 lpfc_" #attr " changed from %d to %d\n", \
+			phba->cfg_##attr, val); \
+		phba->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+			"0450 lpfc_"#attr" attribute cannot be set to %d, "\
+			"allowed range is ["#minval", "#maxval"]\n", val); \
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_param_store - Set a vport attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_store.
+ *
+ * lpfc_##attr##_store: Set an sttribute value.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the attribute value in ascii.
+ * @count: not used.
+ *
+ * Description:
+ * Convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
+#define lpfc_param_store(attr)	\
+static ssize_t \
+lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
+		    const char *buf, size_t count) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	struct lpfc_hba   *phba = vport->phba;\
+	uint val = 0;\
+	if (!isdigit(buf[0]))\
+		return -EINVAL;\
+	if (sscanf(buf, "%i", &val) != 1)\
+		return -EINVAL;\
+	if (lpfc_##attr##_set(phba, val) == 0) \
+		return strlen(buf);\
+	else \
+		return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_show - Return decimal formatted cfg attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in decimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: length of formatted string.
+ **/
+#define lpfc_vport_param_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+}
+
+/**
+ * lpfc_vport_param_hex_show - Return hex formatted attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g.
+ * hba_queue_depth expands into a function with the name
+ * lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in hexadecimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexadecimal.
+ *
+ * Returns: length of formatted string.
+ **/
+#define lpfc_vport_param_hex_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+}
+
+/**
+ * lpfc_vport_param_init - Initialize a vport cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: validates the min and max values then sets the
+ * adapter config field accordingly, or uses the default if out of range
+ * and prints an error message.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
+#define lpfc_vport_param_init(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
+{ \
+	if (lpfc_rangecheck(val, minval, maxval)) {\
+		vport->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+			 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
+			 "allowed range is ["#minval", "#maxval"]\n", val); \
+	vport->cfg_##attr = default;\
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_set - Set a vport cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ * @phba: pointer the the adapter structure.
+ * @val:	integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
+#define lpfc_vport_param_set(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
+{ \
+	if (lpfc_rangecheck(val, minval, maxval)) {\
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+			"3053 lpfc_" #attr \
+			" changed from %d (x%x) to %d (x%x)\n", \
+			vport->cfg_##attr, vport->cfg_##attr, \
+			val, val); \
+		vport->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+			 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
+			 "allowed range is ["#minval", "#maxval"]\n", val); \
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_store - Set a vport attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth
+ * expands into a function with the name lpfc_hba_queue_depth_store
+ *
+ * lpfc_##attr##_store: convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf:	contains the attribute value in decimal.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
+#define lpfc_vport_param_store(attr)	\
+static ssize_t \
+lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
+		    const char *buf, size_t count) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	uint val = 0;\
+	if (!isdigit(buf[0]))\
+		return -EINVAL;\
+	if (sscanf(buf, "%i", &val) != 1)\
+		return -EINVAL;\
+	if (lpfc_##attr##_set(vport, val) == 0) \
+		return strlen(buf);\
+	else \
+		return -EINVAL;\
+}
+
+
+static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
+static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
+static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
+static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
+static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
+static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
+static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
+static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
+static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
+static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
+static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
+static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
+static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
+		lpfc_link_state_store);
+static DEVICE_ATTR(option_rom_version, S_IRUGO,
+		   lpfc_option_rom_version_show, NULL);
+static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
+		   lpfc_num_discovered_ports_show, NULL);
+static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
+static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
+static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
+static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
+static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
+		   lpfc_board_mode_show, lpfc_board_mode_store);
+static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
+static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
+static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
+static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
+static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
+static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
+static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
+static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
+static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
+static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
+static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+		   lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
+		   NULL);
+
+static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+#define WWN_SZ 8
+/**
+ * lpfc_wwn_set - Convert string to the 8 byte WWN value.
+ * @buf: WWN string.
+ * @cnt: Length of string.
+ * @wwn: Array to receive converted wwn value.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain a valid wwn
+ * 0 success
+ **/
+static size_t
+lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
+{
+	unsigned int i, j;
+
+	/* Count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
+	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+		return -EINVAL;
+
+	memset(wwn, 0, WWN_SZ);
+
+	/* Validate and store the new name */
+	for (i = 0, j = 0; i < 16; i++) {
+		if ((*buf >= 'a') && (*buf <= 'f'))
+			j = ((j << 4) | ((*buf++ - 'a') + 10));
+		else if ((*buf >= 'A') && (*buf <= 'F'))
+			j = ((j << 4) | ((*buf++ - 'A') + 10));
+		else if ((*buf >= '0') && (*buf <= '9'))
+			j = ((j << 4) | (*buf++ - '0'));
+		else
+			return -EINVAL;
+		if (i % 2) {
+			wwn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+	return 0;
+}
+/**
+ * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string lpfc_soft_wwn_key.
+ * @count: must be size of lpfc_soft_wwn_key.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
+ * length of buf indicates success
+ **/
+static ssize_t
+lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned int cnt = count;
+	uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
+	u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
+
+	/*
+	 * We're doing a simple sanity check for soft_wwpn setting.
+	 * We require that the user write a specific key to enable
+	 * the soft_wwpn attribute to be settable. Once the attribute
+	 * is written, the enable key resets. If further updates are
+	 * desired, the key must be written again to re-enable the
+	 * attribute.
+	 *
+	 * The "key" is not secret - it is a hardcoded string shown
+	 * here. The intent is to protect against the random user or
+	 * application that is just writing attributes.
+	 */
+	if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				 "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
+				 " be enabled: fawwpn is enabled\n");
+		return -EINVAL;
+	}
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if ((cnt != strlen(lpfc_soft_wwn_key)) ||
+	    (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
+		return -EINVAL;
+
+	phba->soft_wwn_enable = 1;
+
+	dev_printk(KERN_WARNING, &phba->pcidev->dev,
+		   "lpfc%d: soft_wwpn assignment has been enabled.\n",
+		   phba->brd_no);
+	dev_printk(KERN_WARNING, &phba->pcidev->dev,
+		   "  The soft_wwpn feature is not supported by Broadcom.");
+
+	return count;
+}
+static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
+		   lpfc_soft_wwn_enable_store);
+
+/**
+ * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwpn in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			(unsigned long long)phba->cfg_soft_wwpn);
+}
+
+/**
+ * lpfc_soft_wwpn_store - Set the ww port name of the adapter
+ * @dev class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the wwpn in hexadecimal.
+ * @count: number of wwpn bytes in buf
+ *
+ * Returns:
+ * -EACCES hba reset not enabled, adapter over temp
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
+ * -EIO error taking adapter offline or online
+ * value of count on success
+ **/
+static ssize_t
+lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct completion online_compl;
+	int stat1 = 0, stat2 = 0;
+	unsigned int cnt = count;
+	u8 wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+	spin_lock_irq(&phba->hbalock);
+	if (phba->over_temp_state == HBA_OVER_TEMP) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EACCES;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if (!phba->soft_wwn_enable)
+		return -EINVAL;
+
+	/* lock setting wwpn, wwnn down */
+	phba->soft_wwn_enable = 0;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc) {
+		/* not able to set wwpn, unlock it */
+		phba->soft_wwn_enable = 1;
+		return rc;
+	}
+
+	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
+	fc_host_port_name(shost) = phba->cfg_soft_wwpn;
+	if (phba->cfg_soft_wwnn)
+		fc_host_node_name(shost) = phba->cfg_soft_wwnn;
+
+	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+		   "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
+
+	stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+	if (stat1)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0463 lpfc_soft_wwpn attribute set failed to "
+				"reinit adapter - %d\n", stat1);
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+				   LPFC_EVT_ONLINE);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+	if (stat2)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0464 lpfc_soft_wwpn attribute set failed to "
+				"reinit adapter - %d\n", stat2);
+	return (stat1 || stat2) ? -EIO : count;
+}
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,
+		   lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+
+/**
+ * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwnn in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			(unsigned long long)phba->cfg_soft_wwnn);
+}
+
+/**
+ * lpfc_soft_wwnn_store - sets the ww node name of the adapter
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the ww node name in hexadecimal.
+ * @count: number of wwnn bytes in buf.
+ *
+ * Returns:
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
+ * value of count on success
+ **/
+static ssize_t
+lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	u8 wwnn[WWN_SZ];
+	int rc;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if (!phba->soft_wwn_enable)
+		return -EINVAL;
+
+	rc = lpfc_wwn_set(buf, cnt, wwnn);
+	if (rc) {
+		/* Allow wwnn to be set many times, as long as the enable
+		 * is set. However, once the wwpn is set, everything locks.
+		 */
+		return rc;
+	}
+
+	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
+
+	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+		   "lpfc%d: soft_wwnn set. Value will take effect upon "
+		   "setting of the soft_wwpn\n", phba->brd_no);
+
+	return count;
+}
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,
+		   lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+
+/**
+ * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
+}
+
+/**
+ * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	uint8_t wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc)
+		return rc;
+
+	memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	if (wwn_to_u64(wwpn) == 0)
+		phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
+	else
+		phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
+	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
+		   lpfc_oas_tgt_show, lpfc_oas_tgt_store);
+
+/**
+ * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
+}
+
+/**
+ * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
+ *		      Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	unsigned long val;
+	int ret;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret || (val > 0x7f))
+		return -EINVAL;
+
+	if (val)
+		phba->cfg_oas_priority = (uint8_t)val;
+	else
+		phba->cfg_oas_priority = phba->cfg_XLanePriority;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
+		   lpfc_oas_priority_show, lpfc_oas_priority_store);
+
+/**
+ * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
+ *		      for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
+}
+
+/**
+ * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
+ *		      for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int cnt = count;
+	uint8_t wwpn[WWN_SZ];
+	int rc;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	rc = lpfc_wwn_set(buf, cnt, wwpn);
+	if (rc)
+		return rc;
+
+	memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
+	if (wwn_to_u64(wwpn) == 0)
+		phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
+	else
+		phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
+	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+	if (phba->cfg_oas_priority == 0)
+		phba->cfg_oas_priority = phba->cfg_XLanePriority;
+	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
+		   lpfc_oas_vpt_show, lpfc_oas_vpt_store);
+
+/**
+ * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
+ *			    of whether luns will be enabled or disabled
+ *			    for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
+}
+
+/**
+ * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
+ *			    of whether luns will be enabled or disabled
+ *			    for Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int val = 0;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if ((val != 0) && (val != 1))
+		return -EINVAL;
+
+	phba->cfg_oas_lun_state = val;
+	return strlen(buf);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
+		   lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
+
+/**
+ * lpfc_oas_lun_status_show - Return the status of the Optimized Access
+ *                          Storage (OAS) lun returned by the
+ *                          lpfc_oas_lun_show function.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
+		return -EFAULT;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
+}
+static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
+		   lpfc_oas_lun_status_show, NULL);
+
+
+/**
+ * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
+ *			   (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @ndlp: pointer to fcp target node.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the lun.
+ *
+ * Returns:
+ * SUCCESS : 0
+ * -EPERM OAS is not enabled or not supported by this port.
+ *
+ */
+static size_t
+lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+		       uint8_t tgt_wwpn[], uint64_t lun,
+		       uint32_t oas_state, uint8_t pri)
+{
+
+	int rc = 0;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	if (oas_state) {
+		if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+					 (struct lpfc_name *)tgt_wwpn,
+					 lun, pri))
+			rc = -ENOMEM;
+	} else {
+		lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
+				     (struct lpfc_name *)tgt_wwpn, lun, pri);
+	}
+	return rc;
+
+}
+
+/**
+ * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
+ *			  Access Storage (OAS) operations.
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: wwpn of the vport associated with the returned lun
+ * @tgt_wwpn: wwpn of the target associated with the returned lun
+ * @lun_status: status of the lun returned lun
+ *
+ * Returns the first or next lun enabled for OAS operations for the vport/target
+ * specified.  If a lun is found, its vport wwpn, target wwpn and status is
+ * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
+ *
+ * Return:
+ * lun that is OAS enabled for the vport/target
+ * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
+ */
+static uint64_t
+lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+		      uint8_t tgt_wwpn[], uint32_t *lun_status,
+		      uint32_t *lun_pri)
+{
+	uint64_t found_lun;
+
+	if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
+		return NOT_OAS_ENABLED_LUN;
+	if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
+				   phba->sli4_hba.oas_next_vpt_wwpn,
+				   (struct lpfc_name *)
+				   phba->sli4_hba.oas_next_tgt_wwpn,
+				   &phba->sli4_hba.oas_next_lun,
+				   (struct lpfc_name *)vpt_wwpn,
+				   (struct lpfc_name *)tgt_wwpn,
+				   &found_lun, lun_status, lun_pri))
+		return found_lun;
+	else
+		return NOT_OAS_ENABLED_LUN;
+}
+
+/**
+ * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
+ * @phba: lpfc_hba pointer.
+ * @vpt_wwpn: vport wwpn by reference.
+ * @tgt_wwpn: target wwpn by reference.
+ * @lun: the fc lun for setting oas state.
+ * @oas_state: the oas state to be set to the oas_lun.
+ *
+ * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
+ * a lun for OAS operations.
+ *
+ * Return:
+ * SUCCESS: 0
+ * -ENOMEM: failed to enable an lun for OAS operations
+ * -EPERM: OAS is not enabled
+ */
+static ssize_t
+lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
+			  uint8_t tgt_wwpn[], uint64_t lun,
+			  uint32_t oas_state, uint8_t pri)
+{
+
+	int rc;
+
+	rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
+				    oas_state, pri);
+	return rc;
+}
+
+/**
+ * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This routine returns a lun enabled for OAS each time the function
+ * is called.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ **/
+static ssize_t
+lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	uint64_t oas_lun;
+	int len = 0;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
+			return -EFAULT;
+
+	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
+			return -EFAULT;
+
+	oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
+					phba->cfg_oas_tgt_wwpn,
+					&phba->cfg_oas_lun_status,
+					&phba->cfg_oas_priority);
+	if (oas_lun != NOT_OAS_ENABLED_LUN)
+		phba->cfg_oas_flags |= OAS_LUN_VALID;
+
+	len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
+
+	return len;
+}
+
+/**
+ * lpfc_oas_lun_store - Sets the OAS state for lun
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * This function sets the OAS state for lun.  Before this function is called,
+ * the vport wwpn, target wwpn, and oas state need to be set.
+ *
+ * Returns:
+ * SUCCESS: size of formatted string.
+ * -EFAULT: target or vport wwpn was not set properly.
+ * -EPERM: oas is not enabled.
+ * size of formatted string.
+ **/
+static ssize_t
+lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint64_t scsi_lun;
+	uint32_t pri;
+	ssize_t rc;
+
+	if (!phba->cfg_fof)
+		return -EPERM;
+
+	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
+		return -EFAULT;
+
+	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
+		return -EFAULT;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
+		return -EINVAL;
+
+	pri = phba->cfg_oas_priority;
+	if (pri == 0)
+		pri = phba->cfg_XLanePriority;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
+			"priority 0x%x with oas state %d\n",
+			wwn_to_u64(phba->cfg_oas_vpt_wwpn),
+			wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
+			pri, phba->cfg_oas_lun_state);
+
+	rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
+				       phba->cfg_oas_tgt_wwpn, scsi_lun,
+				       phba->cfg_oas_lun_state, pri);
+	if (rc)
+		return rc;
+
+	return count;
+}
+static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
+		   lpfc_oas_lun_show, lpfc_oas_lun_store);
+
+int lpfc_enable_nvmet_cnt;
+unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
+MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
+
+static int lpfc_poll = 0;
+module_param(lpfc_poll, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
+		 " 0 - none,"
+		 " 1 - poll with interrupts enabled"
+		 " 3 - poll and disable FCP ring interrupts");
+
+static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
+		   lpfc_poll_show, lpfc_poll_store);
+
+int lpfc_no_hba_reset_cnt;
+unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
+MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
+
+LPFC_ATTR(sli_mode, 0, 0, 3,
+	"SLI mode selector:"
+	" 0 - auto (SLI-3 if supported),"
+	" 2 - select SLI-2 even on SLI-3 capable HBAs,"
+	" 3 - select SLI-3");
+
+LPFC_ATTR_R(enable_npiv, 1, 0, 1,
+	"Enable NPIV functionality");
+
+LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
+	"FCF Fast failover=1 Priority failover=2");
+
+/*
+# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
+#	0x0 = disabled, XRI/OXID use not tracked.
+#	0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
+#	0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
+*/
+LPFC_ATTR_R(enable_rrq, 2, 0, 2,
+	"Enable RRQ functionality");
+
+/*
+# lpfc_suppress_link_up:  Bring link up at initialization
+#            0x0  = bring link up (issue MBX_INIT_LINK)
+#            0x1  = do NOT bring link up at initialization(MBX_INIT_LINK)
+#            0x2  = never bring up link
+# Default value is 0.
+*/
+LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
+		LPFC_DELAY_INIT_LINK_INDEFINITELY,
+		"Suppress Link Up at initialization");
+/*
+# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
+#       1 - (1024)
+#       2 - (2048)
+#       3 - (3072)
+#       4 - (4096)
+#       5 - (5120)
+*/
+static ssize_t
+lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+}
+
+static DEVICE_ATTR(iocb_hw, S_IRUGO,
+			 lpfc_iocb_hw_show, NULL);
+static ssize_t
+lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pring ? pring->txq_max : 0);
+}
+
+static DEVICE_ATTR(txq_hw, S_IRUGO,
+			 lpfc_txq_hw_show, NULL);
+static ssize_t
+lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pring ? pring->txcmplq_max : 0);
+}
+
+static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
+			 lpfc_txcmplq_hw_show, NULL);
+
+LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
+	"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
+
+/*
+# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
+# until the timer expires. Value range is [0,255]. Default value is 30.
+*/
+static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
+static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
+module_param(lpfc_nodev_tmo, int, 0);
+MODULE_PARM_DESC(lpfc_nodev_tmo,
+		 "Seconds driver will hold I/O waiting "
+		 "for a device to come back");
+
+/**
+ * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the dev loss timeout in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
+}
+
+/**
+ * lpfc_nodev_tmo_init - Set the hba nodev timeout value
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the nodev timeout value.
+ *
+ * Description:
+ * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
+ * a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
+{
+	if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
+		if (val != LPFC_DEF_DEVLOSS_TMO)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0407 Ignoring lpfc_nodev_tmo module "
+					 "parameter because lpfc_devloss_tmo "
+					 "is set.\n");
+		return 0;
+	}
+
+	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = val;
+		vport->cfg_devloss_tmo = val;
+		return 0;
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "0400 lpfc_nodev_tmo attribute cannot be set to"
+			 " %d, allowed range is [%d, %d]\n",
+			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+	vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
+	return -EINVAL;
+}
+
+/**
+ * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
+ * @vport: lpfc vport structure pointer.
+ *
+ * Description:
+ * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
+ **/
+static void
+lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_nodelist  *ndlp;
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->rport)
+			ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+	}
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If the devloss tmo is already set or the vport dev loss tmo has changed
+ * then a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
+{
+	if (vport->dev_loss_tmo_changed ||
+	    (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0401 Ignoring change to lpfc_nodev_tmo "
+				 "because lpfc_devloss_tmo is set.\n");
+		return 0;
+	}
+	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = val;
+		vport->cfg_devloss_tmo = val;
+		/*
+		 * For compat: set the fc_host dev loss so new rports
+		 * will get the value.
+		 */
+		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
+		lpfc_update_rport_devloss_tmo(vport);
+		return 0;
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "0403 lpfc_nodev_tmo attribute cannot be set to "
+			 "%d, allowed range is [%d, %d]\n",
+			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+	return -EINVAL;
+}
+
+lpfc_vport_param_store(nodev_tmo)
+
+static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
+		   lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
+
+/*
+# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
+# disappear until the timer expires. Value range is [0,255]. Default
+# value is 30.
+*/
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_devloss_tmo,
+		 "Seconds driver will hold I/O waiting "
+		 "for a device to come back");
+lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
+		      LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
+lpfc_vport_param_show(devloss_tmo)
+
+/**
+ * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If val is in a valid range then set the vport nodev tmo,
+ * devloss tmo, also set the vport dev loss tmo changed flag.
+ * Else a kernel error message is printed.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
+{
+	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = val;
+		vport->cfg_devloss_tmo = val;
+		vport->dev_loss_tmo_changed = 1;
+		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
+		lpfc_update_rport_devloss_tmo(vport);
+		return 0;
+	}
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "0404 lpfc_devloss_tmo attribute cannot be set to "
+			 "%d, allowed range is [%d, %d]\n",
+			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+	return -EINVAL;
+}
+
+lpfc_vport_param_store(devloss_tmo)
+static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
+		   lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
+
+/*
+ * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
+ * lpfc_suppress_rsp = 0  Disable
+ * lpfc_suppress_rsp = 1  Enable (default)
+ *
+ */
+LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
+	    "Enable suppress rsp feature is firmware supports it");
+
+/*
+ * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 1  use a single RQ pair
+ * lpfc_nvmet_mrq >= 2  use specified RQ pairs for MRQ
+ *
+ */
+LPFC_ATTR_R(nvmet_mrq,
+	    1, 1, 16,
+	    "Specify number of RQ pairs for processing NVMET cmds");
+
+/*
+ * lpfc_enable_fc4_type: Defines what FC4 types are supported.
+ * Supported Values:  1 - register just FCP
+ *                    3 - register both FCP and NVME
+ * Supported values are [1,3]. Default value is 1
+ */
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
+	    LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+	    "Define fc4 type to register with fabric.");
+
+/*
+ * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
+ * This parameter is only used if:
+ *     lpfc_enable_fc4_type is 3 - register both FCP and NVME and
+ *     port is not configured for NVMET.
+ *
+ * ELS/CT always get 10% of XRIs, up to a maximum of 250
+ * The remaining XRIs get split up based on lpfc_xri_split per port:
+ *
+ * Supported Values are in percentages
+ * the xri_split value is the percentage the SCSI port will get. The remaining
+ * percentage will go to NVME.
+ */
+LPFC_ATTR_R(xri_split, 50, 10, 90,
+	    "Division of XRI resources between SCSI and NVME");
+
+/*
+# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
+# deluged with LOTS of information.
+# You can set a bit mask to record specific types of verbose messages:
+# See lpfc_logmsh.h for definitions.
+*/
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
+		       "Verbose logging bit-mask");
+
+/*
+# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
+# objects that have been registered with the nameserver after login.
+*/
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
+		  "Deregister nameserver objects before LOGO");
+
+/*
+# lun_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per FCP LUN. Value range is [1,512]. Default value is 30.
+# If this parameter value is greater than 1/8th the maximum number of exchanges
+# supported by the HBA port, then the lun queue depth will be reduced to
+# 1/8th the maximum number of exchanges.
+*/
+LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
+		  "Max number of FCP commands we can queue to a specific LUN");
+
+/*
+# tgt_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per target port. Value range is [10,65535]. Default value is 65535.
+*/
+LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
+		  "Max number of FCP commands we can queue to a specific target port");
+
+/*
+# hba_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per lpfc HBA. Value range is [32,8192]. If this parameter
+# value is greater than the maximum number of exchanges supported by the HBA,
+# then maximum number of exchanges supported by the HBA is used to determine
+# the hba_queue_depth.
+*/
+LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
+	    "Max number of FCP commands we can queue to a lpfc HBA");
+
+/*
+# peer_port_login:  This parameter allows/prevents logins
+# between peer ports hosted on the same physical port.
+# When this parameter is set 0 peer ports of same physical port
+# are not allowed to login to each other.
+# When this parameter is set 1 peer ports of same physical port
+# are allowed to login to each other.
+# Default value of this parameter is 0.
+*/
+LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
+		  "Allow peer ports on the same physical port to login to each "
+		  "other.");
+
+/*
+# restrict_login:  This parameter allows/prevents logins
+# between Virtual Ports and remote initiators.
+# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
+# other initiators and will attempt to PLOGI all remote ports.
+# When this parameter is set (1) Virtual Ports will reject PLOGIs from
+# remote ports and will not attempt to PLOGI to other initiators.
+# This parameter does not restrict to the physical port.
+# This parameter does not restrict logins to Fabric resident remote ports.
+# Default value of this parameter is 1.
+*/
+static int lpfc_restrict_login = 1;
+module_param(lpfc_restrict_login, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_restrict_login,
+		 "Restrict virtual ports login to remote initiators.");
+lpfc_vport_param_show(restrict_login);
+
+/**
+ * lpfc_restrict_login_init - Set the vport restrict login flag
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical clear the restrict login flag and return.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
+{
+	if (val < 0 || val > 1) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0422 lpfc_restrict_login attribute cannot "
+				 "be set to %d, allowed range is [0, 1]\n",
+				 val);
+		vport->cfg_restrict_login = 1;
+		return -EINVAL;
+	}
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		vport->cfg_restrict_login = 0;
+		return 0;
+	}
+	vport->cfg_restrict_login = val;
+	return 0;
+}
+
+/**
+ * lpfc_restrict_login_set - Set the vport restrict login flag
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical and the val is not zero log a kernel
+ * error message, clear the restrict login flag and return zero.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
+{
+	if (val < 0 || val > 1) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0425 lpfc_restrict_login attribute cannot "
+				 "be set to %d, allowed range is [0, 1]\n",
+				 val);
+		vport->cfg_restrict_login = 1;
+		return -EINVAL;
+	}
+	if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0468 lpfc_restrict_login must be 0 for "
+				 "Physical ports.\n");
+		vport->cfg_restrict_login = 0;
+		return 0;
+	}
+	vport->cfg_restrict_login = val;
+	return 0;
+}
+lpfc_vport_param_store(restrict_login);
+static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
+		   lpfc_restrict_login_show, lpfc_restrict_login_store);
+
+/*
+# Some disk devices have a "select ID" or "select Target" capability.
+# From a protocol standpoint "select ID" usually means select the
+# Fibre channel "ALPA".  In the FC-AL Profile there is an "informative
+# annex" which contains a table that maps a "select ID" (a number
+# between 0 and 7F) to an ALPA.  By default, for compatibility with
+# older drivers, the lpfc driver scans this table from low ALPA to high
+# ALPA.
+#
+# Turning on the scan-down variable (on  = 1, off = 0) will
+# cause the lpfc driver to use an inverted table, effectively
+# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
+#
+# (Note: This "select ID" functionality is a LOOP ONLY characteristic
+# and will not work across a fabric. Also this parameter will take
+# effect only in the case when ALPA map is not available.)
+*/
+LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
+		  "Start scanning for devices from highest ALPA to lowest");
+
+/*
+# lpfc_topology:  link topology for init link
+#            0x0  = attempt loop mode then point-to-point
+#            0x01 = internal loopback mode
+#            0x02 = attempt point-to-point mode only
+#            0x04 = attempt loop mode only
+#            0x06 = attempt point-to-point mode then loop
+# Set point-to-point mode if you want to run as an N_Port.
+# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
+# Default value is 0.
+*/
+LPFC_ATTR(topology, 0, 0, 6,
+	"Select Fibre Channel topology");
+
+/**
+ * lpfc_topology_set - Set the adapters topology field
+ * @phba: lpfc_hba pointer.
+ * @val: topology value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's topology field and
+ * issue a lip; if the lip fails reset the topology to the old value.
+ *
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
+static ssize_t
+lpfc_topology_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val = 0;
+	int nolip = 0;
+	const char *val_buf = buf;
+	int err;
+	uint32_t prev_val;
+
+	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
+		nolip = 1;
+		val_buf = &buf[strlen("nolip ")];
+	}
+
+	if (!isdigit(val_buf[0]))
+		return -EINVAL;
+	if (sscanf(val_buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if (val >= 0 && val <= 6) {
+		prev_val = phba->cfg_topology;
+		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+			val == 4) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"3113 Loop mode not supported at speed %d\n",
+				val);
+			return -EINVAL;
+		}
+		if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+			val == 4) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"3114 Loop mode not supported\n");
+			return -EINVAL;
+		}
+		phba->cfg_topology = val;
+		if (nolip)
+			return strlen(buf);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			"3054 lpfc_topology changed from %d to %d\n",
+			prev_val, val);
+		if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+			phba->fc_topology_changed = 1;
+		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+		if (err) {
+			phba->cfg_topology = prev_val;
+			return -EINVAL;
+		} else
+			return strlen(buf);
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		"%d:0467 lpfc_topology attribute cannot be set to %d, "
+		"allowed range is [0, 6]\n",
+		phba->brd_no, val);
+	return -EINVAL;
+}
+
+lpfc_param_show(topology)
+static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
+		lpfc_topology_show, lpfc_topology_store);
+
+/**
+ * lpfc_static_vport_show: Read callback function for
+ *   lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (vport->vport_flag & STATIC_VPORT)
+		sprintf(buf, "1\n");
+	else
+		sprintf(buf, "0\n");
+
+	return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+		   lpfc_static_vport_show, NULL);
+
+/**
+ * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
+ * @dev: Pointer to class device.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
+ *
+ * This function get called when a user write to the lpfc_stat_data_ctrl
+ * sysfs file. This function parse the command written to the sysfs file
+ * and take appropriate action. These commands are used for controlling
+ * driver statistical data collection.
+ * Following are the command this function handles.
+ *
+ *    setbucket <bucket_type> <base> <step>
+ *			       = Set the latency buckets.
+ *    destroybucket            = destroy all the buckets.
+ *    start                    = start data collection
+ *    stop                     = stop data collection
+ *    reset                    = reset the collected data
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+#define LPFC_MAX_DATA_CTRL_LEN 1024
+	static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
+	unsigned long i;
+	char *str_ptr, *token;
+	struct lpfc_vport **vports;
+	struct Scsi_Host *v_shost;
+	char *bucket_type_str, *base_str, *step_str;
+	unsigned long base, step, bucket_type;
+
+	if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
+		if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
+			return -EINVAL;
+
+		strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
+		str_ptr = &bucket_data[0];
+		/* Ignore this token - this is command token */
+		token = strsep(&str_ptr, "\t ");
+		if (!token)
+			return -EINVAL;
+
+		bucket_type_str = strsep(&str_ptr, "\t ");
+		if (!bucket_type_str)
+			return -EINVAL;
+
+		if (!strncmp(bucket_type_str, "linear", strlen("linear")))
+			bucket_type = LPFC_LINEAR_BUCKET;
+		else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
+			bucket_type = LPFC_POWER2_BUCKET;
+		else
+			return -EINVAL;
+
+		base_str = strsep(&str_ptr, "\t ");
+		if (!base_str)
+			return -EINVAL;
+		base = simple_strtoul(base_str, NULL, 0);
+
+		step_str = strsep(&str_ptr, "\t ");
+		if (!step_str)
+			return -EINVAL;
+		step = simple_strtoul(step_str, NULL, 0);
+		if (!step)
+			return -EINVAL;
+
+		/* Block the data collection for every vport */
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			v_shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(v_shost->host_lock);
+			/* Block and reset data collection */
+			vports[i]->stat_data_blocked = 1;
+			if (vports[i]->stat_data_enabled)
+				lpfc_vport_reset_stat_data(vports[i]);
+			spin_unlock_irq(v_shost->host_lock);
+		}
+
+		/* Set the bucket attributes */
+		phba->bucket_type = bucket_type;
+		phba->bucket_base = base;
+		phba->bucket_step = step;
+
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			v_shost = lpfc_shost_from_vport(vports[i]);
+
+			/* Unblock data collection */
+			spin_lock_irq(v_shost->host_lock);
+			vports[i]->stat_data_blocked = 0;
+			spin_unlock_irq(v_shost->host_lock);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			v_shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->stat_data_blocked = 1;
+			lpfc_free_bucket(vport);
+			vport->stat_data_enabled = 0;
+			vports[i]->stat_data_blocked = 0;
+			spin_unlock_irq(shost->host_lock);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+		phba->bucket_type = LPFC_NO_BUCKET;
+		phba->bucket_base = 0;
+		phba->bucket_step = 0;
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "start", strlen("start"))) {
+		/* If no buckets configured return error */
+		if (phba->bucket_type == LPFC_NO_BUCKET)
+			return -EINVAL;
+		spin_lock_irq(shost->host_lock);
+		if (vport->stat_data_enabled) {
+			spin_unlock_irq(shost->host_lock);
+			return strlen(buf);
+		}
+		lpfc_alloc_bucket(vport);
+		vport->stat_data_enabled = 1;
+		spin_unlock_irq(shost->host_lock);
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "stop", strlen("stop"))) {
+		spin_lock_irq(shost->host_lock);
+		if (vport->stat_data_enabled == 0) {
+			spin_unlock_irq(shost->host_lock);
+			return strlen(buf);
+		}
+		lpfc_free_bucket(vport);
+		vport->stat_data_enabled = 0;
+		spin_unlock_irq(shost->host_lock);
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "reset", strlen("reset"))) {
+		if ((phba->bucket_type == LPFC_NO_BUCKET)
+			|| !vport->stat_data_enabled)
+			return strlen(buf);
+		spin_lock_irq(shost->host_lock);
+		vport->stat_data_blocked = 1;
+		lpfc_vport_reset_stat_data(vport);
+		vport->stat_data_blocked = 0;
+		spin_unlock_irq(shost->host_lock);
+		return strlen(buf);
+	}
+	return -EINVAL;
+}
+
+
+/**
+ * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
+ * @dev: Pointer to class device object.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_stat_data_ctrl sysfs file. This function report the
+ * current statistical data collection state.
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int index = 0;
+	int i;
+	char *bucket_type;
+	unsigned long bucket_value;
+
+	switch (phba->bucket_type) {
+	case LPFC_LINEAR_BUCKET:
+		bucket_type = "linear";
+		break;
+	case LPFC_POWER2_BUCKET:
+		bucket_type = "power2";
+		break;
+	default:
+		bucket_type = "No Bucket";
+		break;
+	}
+
+	sprintf(&buf[index], "Statistical Data enabled :%d, "
+		"blocked :%d, Bucket type :%s, Bucket base :%d,"
+		" Bucket step :%d\nLatency Ranges :",
+		vport->stat_data_enabled, vport->stat_data_blocked,
+		bucket_type, phba->bucket_base, phba->bucket_step);
+	index = strlen(buf);
+	if (phba->bucket_type != LPFC_NO_BUCKET) {
+		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+			if (phba->bucket_type == LPFC_LINEAR_BUCKET)
+				bucket_value = phba->bucket_base +
+					phba->bucket_step * i;
+			else
+				bucket_value = phba->bucket_base +
+				(1 << i) * phba->bucket_step;
+
+			if (index + 10 > PAGE_SIZE)
+				break;
+			sprintf(&buf[index], "%08ld ", bucket_value);
+			index = strlen(buf);
+		}
+	}
+	sprintf(&buf[index], "\n");
+	return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
+		   lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+
+/*
+ * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
+ */
+
+/*
+ * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
+ * for each target.
+ */
+#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
+#define MAX_STAT_DATA_SIZE_PER_TARGET \
+	STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
+
+
+/**
+ * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
+ * @filp: sysfs file
+ * @kobj: Pointer to the kernel object
+ * @bin_attr: Attribute object
+ * @buff: Buffer pointer
+ * @off: File offset
+ * @count: Buffer size
+ *
+ * This function is the read call back function for lpfc_drvr_stat_data
+ * sysfs file. This function export the statistical data to user
+ * applications.
+ **/
+static ssize_t
+sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device,
+		kobj);
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int i = 0, index = 0;
+	unsigned long nport_index;
+	struct lpfc_nodelist *ndlp = NULL;
+	nport_index = (unsigned long)off /
+		MAX_STAT_DATA_SIZE_PER_TARGET;
+
+	if (!vport->stat_data_enabled || vport->stat_data_blocked
+		|| (phba->bucket_type == LPFC_NO_BUCKET))
+		return 0;
+
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+			continue;
+
+		if (nport_index > 0) {
+			nport_index--;
+			continue;
+		}
+
+		if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
+			> count)
+			break;
+
+		if (!ndlp->lat_data)
+			continue;
+
+		/* Print the WWN */
+		sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
+			ndlp->nlp_portname.u.wwn[0],
+			ndlp->nlp_portname.u.wwn[1],
+			ndlp->nlp_portname.u.wwn[2],
+			ndlp->nlp_portname.u.wwn[3],
+			ndlp->nlp_portname.u.wwn[4],
+			ndlp->nlp_portname.u.wwn[5],
+			ndlp->nlp_portname.u.wwn[6],
+			ndlp->nlp_portname.u.wwn[7]);
+
+		index = strlen(buf);
+
+		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+			sprintf(&buf[index], "%010u,",
+				ndlp->lat_data[i].cmd_count);
+			index = strlen(buf);
+		}
+		sprintf(&buf[index], "\n");
+		index = strlen(buf);
+	}
+	spin_unlock_irq(shost->host_lock);
+	return index;
+}
+
+static struct bin_attribute sysfs_drvr_stat_data_attr = {
+	.attr = {
+		.name = "lpfc_drvr_stat_data",
+		.mode = S_IRUSR,
+	},
+	.size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
+	.read = sysfs_drvr_stat_data_read,
+	.write = NULL,
+};
+
+/*
+# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
+# connection.
+# Value range is [0,16]. Default value is 0.
+*/
+/**
+ * lpfc_link_speed_set - Set the adapters link speed
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field and
+ * issue a lip; if the lip fails reset the link speed to the old value.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay.
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
+static ssize_t
+lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val = LPFC_USER_LINK_SPEED_AUTO;
+	int nolip = 0;
+	const char *val_buf = buf;
+	int err;
+	uint32_t prev_val, if_type;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
+	    phba->hba_flag & HBA_FORCED_LINK_SPEED)
+		return -EPERM;
+
+	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
+		nolip = 1;
+		val_buf = &buf[strlen("nolip ")];
+	}
+
+	if (!isdigit(val_buf[0]))
+		return -EINVAL;
+	if (sscanf(val_buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+		"3055 lpfc_link_speed changed from %d to %d %s\n",
+		phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
+
+	if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2879 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported by this port.\n",
+				val);
+		return -EINVAL;
+	}
+	if (val == LPFC_USER_LINK_SPEED_16G &&
+		 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3112 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported in loop mode.\n",
+				val);
+		return -EINVAL;
+	}
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
+		prev_val = phba->cfg_link_speed;
+		phba->cfg_link_speed = val;
+		if (nolip)
+			return strlen(buf);
+
+		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+		if (err) {
+			phba->cfg_link_speed = prev_val;
+			return -EINVAL;
+		} else
+			return strlen(buf);
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		"0469 lpfc_link_speed attribute cannot be set to %d, "
+		"allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
+	return -EINVAL;
+}
+
+static int lpfc_link_speed = 0;
+module_param(lpfc_link_speed, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
+lpfc_param_show(link_speed)
+
+/**
+ * lpfc_link_speed_init - Set the adapters link speed
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, clear the link
+ * speed and return an error.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_link_speed_init(struct lpfc_hba *phba, int val)
+{
+	if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3111 lpfc_link_speed of %d cannot "
+			"support loop mode, setting topology to default.\n",
+			 val);
+		phba->cfg_topology = 0;
+	}
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
+		phba->cfg_link_speed = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0405 lpfc_link_speed attribute cannot "
+			"be set to %d, allowed values are "
+			"["LPFC_LINK_SPEED_STRING"]\n", val);
+	phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
+		   lpfc_link_speed_show, lpfc_link_speed_store);
+
+/*
+# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
+#       0  = aer disabled or not supported
+#       1  = aer supported and enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR(aer_support, 1, 0, 1,
+	"Enable PCIe device AER support");
+lpfc_param_show(aer_support)
+
+/**
+ * lpfc_aer_support_store - Set the adapter for aer support
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing enable or disable aer flag.
+ * @count: unused variable.
+ *
+ * Description:
+ * If the val is 1 and currently the device's AER capability was not
+ * enabled, invoke the kernel's enable AER helper routine, trying to
+ * enable the device's AER capability. If the helper routine enabling
+ * AER returns success, update the device's cfg_aer_support flag to
+ * indicate AER is supported by the device; otherwise, if the device
+ * AER capability is already enabled to support AER, then do nothing.
+ *
+ * If the val is 0 and currently the device's AER support was enabled,
+ * invoke the kernel's disable AER helper routine. After that, update
+ * the device's cfg_aer_support flag to indicate AER is not supported
+ * by the device; otherwise, if the device AER capability is already
+ * disabled from supporting AER, then do nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int val = 0, rc = -EINVAL;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	switch (val) {
+	case 0:
+		if (phba->hba_flag & HBA_AER_ENABLED) {
+			rc = pci_disable_pcie_error_reporting(phba->pcidev);
+			if (!rc) {
+				spin_lock_irq(&phba->hbalock);
+				phba->hba_flag &= ~HBA_AER_ENABLED;
+				spin_unlock_irq(&phba->hbalock);
+				phba->cfg_aer_support = 0;
+				rc = strlen(buf);
+			} else
+				rc = -EPERM;
+		} else {
+			phba->cfg_aer_support = 0;
+			rc = strlen(buf);
+		}
+		break;
+	case 1:
+		if (!(phba->hba_flag & HBA_AER_ENABLED)) {
+			rc = pci_enable_pcie_error_reporting(phba->pcidev);
+			if (!rc) {
+				spin_lock_irq(&phba->hbalock);
+				phba->hba_flag |= HBA_AER_ENABLED;
+				spin_unlock_irq(&phba->hbalock);
+				phba->cfg_aer_support = 1;
+				rc = strlen(buf);
+			} else
+				 rc = -EPERM;
+		} else {
+			phba->cfg_aer_support = 1;
+			rc = strlen(buf);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
+		   lpfc_aer_support_show, lpfc_aer_support_store);
+
+/**
+ * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing flag 1 for aer cleanup state.
+ * @count: unused variable.
+ *
+ * Description:
+ * If the @buf contains 1 and the device currently has the AER support
+ * enabled, then invokes the kernel AER helper routine
+ * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * error status register.
+ *
+ * Notes:
+ *
+ * Returns:
+ * -EINVAL if the buf does not contain the 1 or the device is not currently
+ * enabled with the AER support.
+ **/
+static ssize_t
+lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val, rc = -1;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+
+	if (rc == 0)
+		return strlen(buf);
+	else
+		return -EPERM;
+}
+
+static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
+		   lpfc_aer_cleanup_state);
+
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct pci_dev *pdev = phba->pcidev;
+	int val = 0, rc = -EINVAL;
+
+	/* Sanity check on user data */
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+	if (val < 0)
+		return -EINVAL;
+
+	/* Request disabling virtual functions */
+	if (val == 0) {
+		if (phba->cfg_sriov_nr_virtfn > 0) {
+			pci_disable_sriov(pdev);
+			phba->cfg_sriov_nr_virtfn = 0;
+		}
+		return strlen(buf);
+	}
+
+	/* Request enabling virtual functions */
+	if (phba->cfg_sriov_nr_virtfn > 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3018 There are %d virtual functions "
+				"enabled on physical function.\n",
+				phba->cfg_sriov_nr_virtfn);
+		return -EEXIST;
+	}
+
+	if (val <= LPFC_MAX_VFN_PER_PFN)
+		phba->cfg_sriov_nr_virtfn = val;
+	else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3019 Enabling %d virtual functions is not "
+				"allowed.\n", val);
+		return -EINVAL;
+	}
+
+	rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+	if (rc) {
+		phba->cfg_sriov_nr_virtfn = 0;
+		rc = -EPERM;
+	} else
+		rc = strlen(buf);
+
+	return rc;
+}
+
+LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
+	"Enable PCIe device SR-IOV virtual fn");
+
+lpfc_param_show(sriov_nr_virtfn)
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+		   lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
+/**
+ * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_request_firmware_upgrade_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int val = 0, rc = -EINVAL;
+
+	/* Sanity check on user data */
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
+	if (rc)
+		rc = -EPERM;
+	else
+		rc = strlen(buf);
+	return rc;
+}
+
+static int lpfc_req_fw_upgrade;
+module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
+lpfc_param_show(request_firmware_upgrade)
+
+/**
+ * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
+ * @phba: lpfc_hba pointer.
+ * @val: 0 or 1.
+ *
+ * Description:
+ * Set the initial Linux generic firmware upgrade enable or disable flag.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
+{
+	if (val >= 0 && val <= 1) {
+		phba->cfg_request_firmware_upgrade = val;
+		return 0;
+	}
+	return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
+		   lpfc_request_firmware_upgrade_show,
+		   lpfc_request_firmware_upgrade_store);
+
+/**
+ * lpfc_fcp_imax_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: string with the number of fast-path FCP interrupts per second.
+ * @count: unused variable.
+ *
+ * Description:
+ * If val is in a valid range [636,651042], then set the adapter's
+ * maximum number of fast-path FCP interrupts per second.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int val = 0, i;
+
+	/* fcp_imax is only valid for SLI4 */
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return -EINVAL;
+
+	/* Sanity check on user data */
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	/*
+	 * Value range for the HBA is [5000,5000000]
+	 * The value for each EQ depends on how many EQs are configured.
+	 * Allow value == 0
+	 */
+	if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
+		return -EINVAL;
+
+	phba->cfg_fcp_imax = (uint32_t)val;
+	phba->initial_imax = phba->cfg_fcp_imax;
+
+	for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
+		lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
+					 val);
+
+	return strlen(buf);
+}
+
+/*
+# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
+# for the HBA.
+#
+# Value range is [5,000 to 5,000,000]. Default value is 50,000.
+*/
+static int lpfc_fcp_imax = LPFC_DEF_IMAX;
+module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_imax,
+	    "Set the maximum number of FCP interrupts per second per HBA");
+lpfc_param_show(fcp_imax)
+
+/**
+ * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [636,651042], then initialize the adapter's
+ * maximum number of fast-path FCP interrupts per second.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
+{
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		phba->cfg_fcp_imax = 0;
+		return 0;
+	}
+
+	if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
+	    (val == 0)) {
+		phba->cfg_fcp_imax = val;
+		return 0;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3016 lpfc_fcp_imax: %d out of range, using default\n",
+			val);
+	phba->cfg_fcp_imax = LPFC_DEF_IMAX;
+
+	return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
+		   lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+
+/*
+ * lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
+ *       0       No auto_imax support
+ *       1       auto imax on
+ * Auto imax will change the value of fcp_imax on a per EQ basis, using
+ * the EQ Delay Multiplier, depending on the activity for that EQ.
+ * Value range [0,1]. Default value is 1.
+ */
+LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
+
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_vector_map_info *cpup;
+	int  len = 0;
+
+	if ((phba->sli_rev != LPFC_SLI_REV4) ||
+	    (phba->intr_type != MSIX))
+		return len;
+
+	switch (phba->cfg_fcp_cpu_map) {
+	case 0:
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"fcp_cpu_map: No mapping (%d)\n",
+				phba->cfg_fcp_cpu_map);
+		return len;
+	case 1:
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"fcp_cpu_map: HBA centric mapping (%d): "
+				"%d online CPUs\n",
+				phba->cfg_fcp_cpu_map,
+				phba->sli4_hba.num_online_cpu);
+		break;
+	case 2:
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"fcp_cpu_map: Driver centric mapping (%d): "
+				"%d online CPUs\n",
+				phba->cfg_fcp_cpu_map,
+				phba->sli4_hba.num_online_cpu);
+		break;
+	}
+
+	while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
+		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
+
+		/* margin should fit in this and the truncated message */
+		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"CPU %02d io_chan %02d "
+					"physid %d coreid %d\n",
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->channel_id, cpup->phys_id,
+					cpup->core_id);
+		else
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"CPU %02d io_chan %02d "
+					"physid %d coreid %d IRQ %d\n",
+					phba->sli4_hba.curr_disp_cpu,
+					cpup->channel_id, cpup->phys_id,
+					cpup->core_id, cpup->irq);
+
+		phba->sli4_hba.curr_disp_cpu++;
+
+		/* display max number of CPUs keeping some margin */
+		if (phba->sli4_hba.curr_disp_cpu <
+				phba->sli4_hba.num_present_cpu &&
+				(len >= (PAGE_SIZE - 64))) {
+			len += snprintf(buf + len, PAGE_SIZE-len, "more...\n");
+			break;
+		}
+	}
+
+	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
+		phba->sli4_hba.curr_disp_cpu = 0;
+
+	return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL  - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	int status = -EINVAL;
+	return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+#	0 - Do not affinitze IRQ vectors
+#	1 - Affintize HBA vectors with respect to each HBA
+#	    (start with CPU0 for each HBA)
+#	2 - Affintize HBA vectors with respect to the entire driver
+#	    (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+		 "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		phba->cfg_fcp_cpu_map = 0;
+		return 0;
+	}
+
+	if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+		phba->cfg_fcp_cpu_map = val;
+		return 0;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3326 lpfc_fcp_cpu_map: %d out of range, using "
+			"default\n", val);
+	phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+	return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+		   lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
+/*
+# lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
+# Value range is [2,3]. Default value is 3.
+*/
+LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
+		  "Select Fibre Channel class of service for FCP sequences");
+
+/*
+# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
+# is [0,1]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+		   "Use ADISC on rediscovery to authenticate FCP devices");
+
+/*
+# lpfc_first_burst_size: First burst size to use on the NPorts
+# that support first burst.
+# Value range is [0,65536]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
+		   "First burst size for Targets that support first burst");
+
+/*
+* lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
+* When the driver is configured as an NVME target, this value is
+* communicated to the NVME initiator in the PRLI response.  It is
+* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
+* parameters are set and the target is sending the PRLI RSP.
+* Parameter supported on physical port only - no NPIV support.
+* Value range is [0,65536]. Default value is 0.
+*/
+LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
+	     "NVME Target mode first burst size in 512B increments.");
+
+/*
+ * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
+ * For the Initiator (I), enabling this parameter means that an NVMET
+ * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
+ * processed by the initiator for subsequent NVME FCP IO. For the target
+ * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
+ * driver parameter as the target function's first burst size returned to the
+ * initiator in the target's NVME PRLI response. Parameter supported on physical
+ * port only - no NPIV support.
+ * Value range is [0,1]. Default value is 0 (disabled).
+ */
+LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
+	     "Enable First Burst feature on I and T functions.");
+
+/*
+# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
+# depth. Default value is 0. When the value of this parameter is zero the
+# SCSI command completion time is not used for controlling I/O queue depth. When
+# the parameter is set to a non-zero value, the I/O queue depth is controlled
+# to limit the I/O completion time to the parameter value.
+# The value is set in milliseconds.
+*/
+LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
+	"Use command completion time to control queue depth");
+
+lpfc_vport_param_show(max_scsicmpl_time);
+static int
+lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+
+	if (val == vport->cfg_max_scsicmpl_time)
+		return 0;
+	if ((val < 0) || (val > 60000))
+		return -EINVAL;
+	vport->cfg_max_scsicmpl_time = val;
+
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+	}
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+lpfc_vport_param_store(max_scsicmpl_time);
+static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
+		   lpfc_max_scsicmpl_time_show,
+		   lpfc_max_scsicmpl_time_store);
+
+/*
+# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
+# range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
+
+/*
+ * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
+ * range is [0,1]. Default value is 0.
+ * For [0], FCP commands are issued to Work Queues ina round robin fashion.
+ * For [1], FCP commands are issued to a Work Queue associated with the
+ *          current CPU.
+ *
+ * LPFC_FCP_SCHED_ROUND_ROBIN == 0
+ * LPFC_FCP_SCHED_BY_CPU == 1
+ *
+ * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
+ * affinity for FCP/NVME I/Os through Work Queues associated with the current
+ * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
+ * through WQs will be used.
+ */
+LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
+	     LPFC_FCP_SCHED_ROUND_ROBIN,
+	     LPFC_FCP_SCHED_BY_CPU,
+	     "Determine scheduling algorithm for "
+	     "issuing commands [0] - Round Robin, [1] - Current CPU");
+
+/*
+# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
+# range is [0,1]. Default value is 0.
+# For [0], bus reset issues target reset to ALL devices
+# For [1], bus reset issues target reset to non-FCP2 devices
+*/
+LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
+	     "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
+
+
+/*
+# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
+# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
+# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
+# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
+# cr_delay is set to 0.
+*/
+LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
+		"interrupt response is generated");
+
+LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
+		"interrupt response is generated");
+
+/*
+# lpfc_multi_ring_support:  Determines how many rings to spread available
+# cmd/rsp IOCB entries across.
+# Value range is [1,2]. Default value is 1.
+*/
+LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
+		"SLI rings to spread IOCB entries across");
+
+/*
+# lpfc_multi_ring_rctl:  If lpfc_multi_ring_support is enabled, this
+# identifies what rctl value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
+*/
+LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
+	     255, "Identifies RCTL for additional ring configuration");
+
+/*
+# lpfc_multi_ring_type:  If lpfc_multi_ring_support is enabled, this
+# identifies what type value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
+*/
+LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
+	     255, "Identifies TYPE for additional ring configuration");
+
+/*
+# lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
+#       0  = SmartSAN functionality disabled (default)
+#       1  = SmartSAN functionality enabled
+# This parameter will override the value of lpfc_fdmi_on module parameter.
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
+
+/*
+# lpfc_fdmi_on: Controls FDMI support.
+#       0       No FDMI support (default)
+#       1       Traditional FDMI support
+# Traditional FDMI support means the driver will assume FDMI-2 support;
+# however, if that fails, it will fallback to FDMI-1.
+# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
+# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
+# lpfc_fdmi_on.
+# Value range [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
+
+/*
+# Specifies the maximum number of ELS cmds we can have outstanding (for
+# discovery). Value range is [1,64]. Default value = 32.
+*/
+LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
+		 "during discovery");
+
+/*
+# lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
+#    will be scanned by the SCSI midlayer when sequential scanning is
+#    used; and is also the highest LUN ID allowed when the SCSI midlayer
+#    parses REPORT_LUN responses. The lpfc driver has no LUN count or
+#    LUN ID limit, but the SCSI midlayer requires this field for the uses
+#    above. The lpfc driver limits the default value to 255 for two reasons.
+#    As it bounds the sequential scan loop, scanning for thousands of luns
+#    on a target can take minutes of wall clock time.  Additionally,
+#    there are FC targets, such as JBODs, that only recognize 8-bits of
+#    LUN ID. When they receive a value greater than 8 bits, they chop off
+#    the high order bits. In other words, they see LUN IDs 0, 256, 512,
+#    and so on all as LUN ID 0. This causes the linux kernel, which sees
+#    valid responses at each of the LUN IDs, to believe there are multiple
+#    devices present, when in fact, there is only 1.
+#    A customer that is aware of their target behaviors, and the results as
+#    indicated above, is welcome to increase the lpfc_max_luns value.
+#    As mentioned, this value is not used by the lpfc driver, only the
+#    SCSI midlayer.
+# Value range is [0,65535]. Default value is 255.
+# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
+*/
+LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
+
+/*
+# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
+# Value range is [1,255], default value is 10.
+*/
+LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
+	     "Milliseconds driver will wait between polling FCP ring");
+
+/*
+# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
+# to complete in seconds. Value range is [5,180], default value is 60.
+*/
+LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
+	     "Maximum time to wait for task management commands to complete");
+/*
+# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
+#		support this feature
+#       0  = MSI disabled
+#       1  = MSI enabled
+#       2  = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
+*/
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+	    "MSI-X (2), if possible");
+
+/*
+ * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
+ *
+ *      0  = NVME OAS disabled
+ *      1  = NVME OAS enabled
+ *
+ * Value range is [0,1]. Default value is 0.
+ */
+LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
+	     "Use OAS bit on NVME IOs");
+
+/*
+ * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
+ * will advertise it supports to the SCSI layer. This also will map to
+ * the number of WQs the driver will create.
+ *
+ *      0    = Configure the number of io channels to the number of active CPUs.
+ *      1,32 = Manually specify how many io channels to use.
+ *
+ * Value range is [0,32]. Default value is 4.
+ */
+LPFC_ATTR_R(fcp_io_channel,
+	    LPFC_FCP_IO_CHAN_DEF,
+	    LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
+	    "Set the number of FCP I/O channels");
+
+/*
+ * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
+ * will advertise it supports to the NVME layer. This also will map to
+ * the number of WQs the driver will create.
+ *
+ * This module parameter is valid when lpfc_enable_fc4_type is set
+ * to support NVME.
+ *
+ * The NVME Layer will try to create this many, plus 1 administrative
+ * hardware queue. The administrative queue will always map to WQ 0
+ * A hardware IO queue maps (qidx) to a specific driver WQ.
+ *
+ *      0    = Configure the number of io channels to the number of active CPUs.
+ *      1,32 = Manually specify how many io channels to use.
+ *
+ * Value range is [0,32]. Default value is 0.
+ */
+LPFC_ATTR_R(nvme_io_channel,
+	    LPFC_NVME_IO_CHAN_DEF,
+	    LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
+	    "Set the number of NVME I/O channels");
+
+/*
+# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
+#       0  = HBA resets disabled
+#       1  = HBA resets enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
+
+/*
+# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
+#       0  = HBA Heartbeat disabled
+#       1  = HBA Heartbeat enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
+
+/*
+# lpfc_EnableXLane: Enable Express Lane Feature
+#      0x0   Express Lane Feature disabled
+#      0x1   Express Lane Feature enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
+
+/*
+# lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature
+#       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits)
+# Value range is [0x0,0x7f]. Default value is 0
+*/
+LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
+
+/*
+# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
+#       0  = BlockGuard disabled (default)
+#       1  = BlockGuard enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
+
+/*
+# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
+#       0  = disabled (default)
+#       1  = enabled
+# Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
+*/
+unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
+
+/*
+# lpfc_prot_mask: i
+#	- Bit mask of host protection capabilities used to register with the
+#	  SCSI mid-layer
+# 	- Only meaningful if BG is turned on (lpfc_enable_bg=1).
+#	- Allows you to ultimately specify which profiles to use
+#	- Default will result in registering capabilities for all profiles.
+#	- SHOST_DIF_TYPE1_PROTECTION	1
+#		HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
+#	- SHOST_DIX_TYPE0_PROTECTION	8
+#		HBA supports DIX Type 0: Host to HBA protection only
+#	- SHOST_DIX_TYPE1_PROTECTION	16
+#		HBA supports DIX Type 1: Host to HBA  Type 1 protection
+#
+*/
+LPFC_ATTR(prot_mask,
+	(SHOST_DIF_TYPE1_PROTECTION |
+	SHOST_DIX_TYPE0_PROTECTION |
+	SHOST_DIX_TYPE1_PROTECTION),
+	0,
+	(SHOST_DIF_TYPE1_PROTECTION |
+	SHOST_DIX_TYPE0_PROTECTION |
+	SHOST_DIX_TYPE1_PROTECTION),
+	"T10-DIF host protection capabilities mask");
+
+/*
+# lpfc_prot_guard: i
+#	- Bit mask of protection guard types to register with the SCSI mid-layer
+#	- Guard types are currently either 1) T10-DIF CRC 2) IP checksum
+#	- Allows you to ultimately specify which profiles to use
+#	- Default will result in registering capabilities for all guard types
+#
+*/
+LPFC_ATTR(prot_guard,
+	SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
+	"T10-DIF host protection guard type");
+
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+LPFC_ATTR(delay_discovery, 0, 0, 1,
+	"Delay NPort discovery when Clean Address bit is cleared.");
+
+/*
+ * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
+ * This value can be set to values between 64 and 4096. The default value is
+ * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
+ * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
+ */
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+	    LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+
+/*
+ * lpfc_enable_mds_diags: Enable MDS Diagnostics
+ *       0  = MDS Diagnostics disabled (default)
+ *       1  = MDS Diagnostics enabled
+ * Value range is [0,1]. Default value is 0.
+ */
+LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+
+/*
+ * lpfc_enable_bbcr: Enable BB Credit Recovery
+ *       0  = BB Credit Recovery disabled
+ *       1  = BB Credit Recovery enabled (default)
+ * Value range is [0,1]. Default value is 1.
+ */
+LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
+
+struct device_attribute *lpfc_hba_attrs[] = {
+	&dev_attr_nvme_info,
+	&dev_attr_bg_info,
+	&dev_attr_bg_guard_err,
+	&dev_attr_bg_apptag_err,
+	&dev_attr_bg_reftag_err,
+	&dev_attr_info,
+	&dev_attr_serialnum,
+	&dev_attr_modeldesc,
+	&dev_attr_modelname,
+	&dev_attr_programtype,
+	&dev_attr_portnum,
+	&dev_attr_fwrev,
+	&dev_attr_hdw,
+	&dev_attr_option_rom_version,
+	&dev_attr_link_state,
+	&dev_attr_num_discovered_ports,
+	&dev_attr_menlo_mgmt_mode,
+	&dev_attr_lpfc_drvr_version,
+	&dev_attr_lpfc_enable_fip,
+	&dev_attr_lpfc_temp_sensor,
+	&dev_attr_lpfc_log_verbose,
+	&dev_attr_lpfc_lun_queue_depth,
+	&dev_attr_lpfc_tgt_queue_depth,
+	&dev_attr_lpfc_hba_queue_depth,
+	&dev_attr_lpfc_peer_port_login,
+	&dev_attr_lpfc_nodev_tmo,
+	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_enable_fc4_type,
+	&dev_attr_lpfc_xri_split,
+	&dev_attr_lpfc_fcp_class,
+	&dev_attr_lpfc_use_adisc,
+	&dev_attr_lpfc_first_burst_size,
+	&dev_attr_lpfc_ack0,
+	&dev_attr_lpfc_topology,
+	&dev_attr_lpfc_scan_down,
+	&dev_attr_lpfc_link_speed,
+	&dev_attr_lpfc_fcp_io_sched,
+	&dev_attr_lpfc_fcp2_no_tgt_reset,
+	&dev_attr_lpfc_cr_delay,
+	&dev_attr_lpfc_cr_count,
+	&dev_attr_lpfc_multi_ring_support,
+	&dev_attr_lpfc_multi_ring_rctl,
+	&dev_attr_lpfc_multi_ring_type,
+	&dev_attr_lpfc_fdmi_on,
+	&dev_attr_lpfc_enable_SmartSAN,
+	&dev_attr_lpfc_max_luns,
+	&dev_attr_lpfc_enable_npiv,
+	&dev_attr_lpfc_fcf_failover_policy,
+	&dev_attr_lpfc_enable_rrq,
+	&dev_attr_nport_evt_cnt,
+	&dev_attr_board_mode,
+	&dev_attr_max_vpi,
+	&dev_attr_used_vpi,
+	&dev_attr_max_rpi,
+	&dev_attr_used_rpi,
+	&dev_attr_max_xri,
+	&dev_attr_used_xri,
+	&dev_attr_npiv_info,
+	&dev_attr_issue_reset,
+	&dev_attr_lpfc_poll,
+	&dev_attr_lpfc_poll_tmo,
+	&dev_attr_lpfc_task_mgmt_tmo,
+	&dev_attr_lpfc_use_msi,
+	&dev_attr_lpfc_nvme_oas,
+	&dev_attr_lpfc_auto_imax,
+	&dev_attr_lpfc_fcp_imax,
+	&dev_attr_lpfc_fcp_cpu_map,
+	&dev_attr_lpfc_fcp_io_channel,
+	&dev_attr_lpfc_suppress_rsp,
+	&dev_attr_lpfc_nvme_io_channel,
+	&dev_attr_lpfc_nvmet_mrq,
+	&dev_attr_lpfc_nvme_enable_fb,
+	&dev_attr_lpfc_nvmet_fb_size,
+	&dev_attr_lpfc_enable_bg,
+	&dev_attr_lpfc_soft_wwnn,
+	&dev_attr_lpfc_soft_wwpn,
+	&dev_attr_lpfc_soft_wwn_enable,
+	&dev_attr_lpfc_enable_hba_reset,
+	&dev_attr_lpfc_enable_hba_heartbeat,
+	&dev_attr_lpfc_EnableXLane,
+	&dev_attr_lpfc_XLanePriority,
+	&dev_attr_lpfc_xlane_lun,
+	&dev_attr_lpfc_xlane_tgt,
+	&dev_attr_lpfc_xlane_vpt,
+	&dev_attr_lpfc_xlane_lun_state,
+	&dev_attr_lpfc_xlane_lun_status,
+	&dev_attr_lpfc_xlane_priority,
+	&dev_attr_lpfc_sg_seg_cnt,
+	&dev_attr_lpfc_max_scsicmpl_time,
+	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_aer_support,
+	&dev_attr_lpfc_aer_state_cleanup,
+	&dev_attr_lpfc_sriov_nr_virtfn,
+	&dev_attr_lpfc_req_fw_upgrade,
+	&dev_attr_lpfc_suppress_link_up,
+	&dev_attr_lpfc_iocb_cnt,
+	&dev_attr_iocb_hw,
+	&dev_attr_txq_hw,
+	&dev_attr_txcmplq_hw,
+	&dev_attr_lpfc_fips_level,
+	&dev_attr_lpfc_fips_rev,
+	&dev_attr_lpfc_dss,
+	&dev_attr_lpfc_sriov_hw_max_virtfn,
+	&dev_attr_protocol,
+	&dev_attr_lpfc_xlane_supported,
+	&dev_attr_lpfc_enable_mds_diags,
+	&dev_attr_lpfc_enable_bbcr,
+	NULL,
+};
+
+struct device_attribute *lpfc_vport_attrs[] = {
+	&dev_attr_info,
+	&dev_attr_link_state,
+	&dev_attr_num_discovered_ports,
+	&dev_attr_lpfc_drvr_version,
+	&dev_attr_lpfc_log_verbose,
+	&dev_attr_lpfc_lun_queue_depth,
+	&dev_attr_lpfc_tgt_queue_depth,
+	&dev_attr_lpfc_nodev_tmo,
+	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_hba_queue_depth,
+	&dev_attr_lpfc_peer_port_login,
+	&dev_attr_lpfc_restrict_login,
+	&dev_attr_lpfc_fcp_class,
+	&dev_attr_lpfc_use_adisc,
+	&dev_attr_lpfc_first_burst_size,
+	&dev_attr_lpfc_max_luns,
+	&dev_attr_nport_evt_cnt,
+	&dev_attr_npiv_info,
+	&dev_attr_lpfc_enable_da_id,
+	&dev_attr_lpfc_max_scsicmpl_time,
+	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_static_vport,
+	&dev_attr_lpfc_fips_level,
+	&dev_attr_lpfc_fips_rev,
+	NULL,
+};
+
+/**
+ * sysfs_ctlreg_write - Write method for writing to ctlreg
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * -EPERM adapter is offline
+ * value of count, buf contents written
+ **/
+static ssize_t
+sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *bin_attr,
+		   char *buf, loff_t off, size_t count)
+{
+	size_t buf_off;
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->sli_rev >= LPFC_SLI_REV4)
+		return -EPERM;
+
+	if ((off + count) > FF_REG_AREA_SIZE)
+		return -ERANGE;
+
+	if (count <= LPFC_REG_WRITE_KEY_SIZE)
+		return 0;
+
+	if (off % 4 || count % 4 || (unsigned long)buf % 4)
+		return -EINVAL;
+
+	/* This is to protect HBA registers from accidental writes. */
+	if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
+		return -EINVAL;
+
+	if (!(vport->fc_flag & FC_OFFLINE_MODE))
+		return -EPERM;
+
+	spin_lock_irq(&phba->hbalock);
+	for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
+			buf_off += sizeof(uint32_t))
+		writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
+		       phba->ctrl_regs_memmap_p + off + buf_off);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	return count;
+}
+
+/**
+ * sysfs_ctlreg_read - Read method for reading from ctlreg
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: if successful contains the data from the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to read data into buf.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * value of count, buf contents read
+ **/
+static ssize_t
+sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
+		  struct bin_attribute *bin_attr,
+		  char *buf, loff_t off, size_t count)
+{
+	size_t buf_off;
+	uint32_t * tmp_ptr;
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->sli_rev >= LPFC_SLI_REV4)
+		return -EPERM;
+
+	if (off > FF_REG_AREA_SIZE)
+		return -ERANGE;
+
+	if ((off + count) > FF_REG_AREA_SIZE)
+		count = FF_REG_AREA_SIZE - off;
+
+	if (count == 0) return 0;
+
+	if (off % 4 || count % 4 || (unsigned long)buf % 4)
+		return -EINVAL;
+
+	spin_lock_irq(&phba->hbalock);
+
+	for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
+		tmp_ptr = (uint32_t *)(buf + buf_off);
+		*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_ctlreg_attr = {
+	.attr = {
+		.name = "ctlreg",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 256,
+	.read = sysfs_ctlreg_read,
+	.write = sysfs_ctlreg_write,
+};
+
+/**
+ * sysfs_mbox_write - Write method for writing information via mbox
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
+ *
+ * Returns:
+ * -EPERM operation not permitted
+ **/
+static ssize_t
+sysfs_mbox_write(struct file *filp, struct kobject *kobj,
+		 struct bin_attribute *bin_attr,
+		 char *buf, loff_t off, size_t count)
+{
+	return -EPERM;
+}
+
+/**
+ * sysfs_mbox_read - Read method for reading information via mbox
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be read from sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
+ *
+ * Returns:
+ * -EPERM operation not permitted
+ **/
+static ssize_t
+sysfs_mbox_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
+		char *buf, loff_t off, size_t count)
+{
+	return -EPERM;
+}
+
+static struct bin_attribute sysfs_mbox_attr = {
+	.attr = {
+		.name = "mbox",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = MAILBOX_SYSFS_MAX,
+	.read = sysfs_mbox_read,
+	.write = sysfs_mbox_write,
+};
+
+/**
+ * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
+ * @vport: address of lpfc vport structure.
+ *
+ * Return codes:
+ * zero on success
+ * error return code from sysfs_create_bin_file()
+ **/
+int
+lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	int error;
+
+	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+				      &sysfs_drvr_stat_data_attr);
+
+	/* Virtual ports do not need ctrl_reg and mbox */
+	if (error || vport->port_type == LPFC_NPIV_PORT)
+		goto out;
+
+	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+				      &sysfs_ctlreg_attr);
+	if (error)
+		goto out_remove_stat_attr;
+
+	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+				      &sysfs_mbox_attr);
+	if (error)
+		goto out_remove_ctlreg_attr;
+
+	return 0;
+out_remove_ctlreg_attr:
+	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+out_remove_stat_attr:
+	sysfs_remove_bin_file(&shost->shost_dev.kobj,
+			&sysfs_drvr_stat_data_attr);
+out:
+	return error;
+}
+
+/**
+ * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
+ * @vport: address of lpfc vport structure.
+ **/
+void
+lpfc_free_sysfs_attr(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	sysfs_remove_bin_file(&shost->shost_dev.kobj,
+		&sysfs_drvr_stat_data_attr);
+	/* Virtual ports do not need ctrl_reg and mbox */
+	if (vport->port_type == LPFC_NPIV_PORT)
+		return;
+	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
+	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+}
+
+/*
+ * Dynamic FC Host Attributes Support
+ */
+
+/**
+ * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+
+	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+				      sizeof fc_host_symbolic_name(shost));
+}
+
+/**
+ * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_id(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+	/* note: fc_myDID already in cpu endianness */
+	fc_host_port_id(shost) = vport->fc_myDID;
+}
+
+/**
+ * lpfc_get_host_port_type - Set the value of the scsi host port type
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_type(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (vport->port_type == LPFC_NPIV_PORT) {
+		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+	} else if (lpfc_is_link_up(phba)) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			if (vport->fc_flag & FC_PUBLIC_LOOP)
+				fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+			else
+				fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+		} else {
+			if (vport->fc_flag & FC_FABRIC)
+				fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+			else
+				fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+		}
+	} else
+		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_port_state - Set the value of the scsi host port state
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+	else {
+		switch (phba->link_state) {
+		case LPFC_LINK_UNKNOWN:
+		case LPFC_LINK_DOWN:
+			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+			break;
+		case LPFC_LINK_UP:
+		case LPFC_CLEAR_LA:
+		case LPFC_HBA_READY:
+			/* Links up, reports port state accordingly */
+			if (vport->port_state < LPFC_VPORT_READY)
+				fc_host_port_state(shost) =
+							FC_PORTSTATE_BYPASSED;
+			else
+				fc_host_port_state(shost) =
+							FC_PORTSTATE_ONLINE;
+			break;
+		case LPFC_HBA_ERROR:
+			fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+			break;
+		default:
+			fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+			break;
+		}
+	}
+
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_speed - Set the value of the scsi host speed
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_speed(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	spin_lock_irq(shost->host_lock);
+
+	if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
+		switch(phba->fc_linkspeed) {
+		case LPFC_LINK_SPEED_1GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+			break;
+		case LPFC_LINK_SPEED_2GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+			break;
+		case LPFC_LINK_SPEED_4GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+			break;
+		case LPFC_LINK_SPEED_8GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+			break;
+		case LPFC_LINK_SPEED_10GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+			break;
+		case LPFC_LINK_SPEED_16GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+			break;
+		case LPFC_LINK_SPEED_32GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
+			break;
+		default:
+			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+			break;
+		}
+	} else
+		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_fabric_name (struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	u64 node_name;
+
+	spin_lock_irq(shost->host_lock);
+
+	if ((vport->port_state > LPFC_FLOGI) &&
+	    ((vport->fc_flag & FC_FABRIC) ||
+	     ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+	      (vport->fc_flag & FC_PUBLIC_LOOP))))
+		node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
+	else
+		/* fabric is local port if there is no F/FL_Port */
+		node_name = 0;
+
+	spin_unlock_irq(shost->host_lock);
+
+	fc_host_fabric_name(shost) = node_name;
+}
+
+/**
+ * lpfc_get_stats - Return statistical information about the adapter
+ * @shost: kernel scsi host pointer.
+ *
+ * Notes:
+ * NULL on error for link down, no mbox pool, sli2 active,
+ * management not allowed, memory allocation error, or mbox error.
+ *
+ * Returns:
+ * NULL for error
+ * address of the adapter host statistics
+ **/
+static struct fc_host_statistics *
+lpfc_get_stats(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_sli   *psli = &phba->sli;
+	struct fc_host_statistics *hs = &phba->link_stats;
+	struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	unsigned long seconds;
+	int rc = 0;
+
+	/*
+	 * prevent udev from issuing mailbox commands until the port is
+	 * configured.
+	 */
+	if (phba->link_state < LPFC_LINK_DOWN ||
+	    !phba->mbox_mem_pool ||
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
+		return NULL;
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+		return NULL;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return NULL;
+	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	pmb->mbxCommand = MBX_READ_STATUS;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return NULL;
+	}
+
+	memset(hs, 0, sizeof (struct fc_host_statistics));
+
+	hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+	/*
+	 * The MBX_READ_STATUS returns tx_k_bytes which has to
+	 * converted to words
+	 */
+	hs->tx_words = (uint64_t)
+			((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+			* (uint64_t)256);
+	hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+	hs->rx_words = (uint64_t)
+			((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+			 * (uint64_t)256);
+
+	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+	pmb->mbxCommand = MBX_READ_LNK_STAT;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return NULL;
+	}
+
+	hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+	hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+	hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+	hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+	hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+	hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+	hs->error_frames = pmb->un.varRdLnk.crcCnt;
+
+	hs->link_failure_count -= lso->link_failure_count;
+	hs->loss_of_sync_count -= lso->loss_of_sync_count;
+	hs->loss_of_signal_count -= lso->loss_of_signal_count;
+	hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
+	hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
+	hs->invalid_crc_count -= lso->invalid_crc_count;
+	hs->error_frames -= lso->error_frames;
+
+	if (phba->hba_flag & HBA_FCOE_MODE) {
+		hs->lip_count = -1;
+		hs->nos_count = (phba->link_events >> 1);
+		hs->nos_count -= lso->link_events;
+	} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		hs->lip_count = (phba->fc_eventTag >> 1);
+		hs->lip_count -= lso->link_events;
+		hs->nos_count = -1;
+	} else {
+		hs->lip_count = -1;
+		hs->nos_count = (phba->fc_eventTag >> 1);
+		hs->nos_count -= lso->link_events;
+	}
+
+	hs->dumped_frames = -1;
+
+	seconds = get_seconds();
+	if (seconds < psli->stats_start)
+		hs->seconds_since_last_reset = seconds +
+				((unsigned long)-1 - psli->stats_start);
+	else
+		hs->seconds_since_last_reset = seconds - psli->stats_start;
+
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	return hs;
+}
+
+/**
+ * lpfc_reset_stats - Copy the adapter link stats information
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_reset_stats(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_sli   *psli = &phba->sli;
+	struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	int rc = 0;
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+		return;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return;
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	pmb->mbxCommand = MBX_READ_STATUS;
+	pmb->mbxOwner = OWN_HOST;
+	pmb->un.varWords[0] = 0x1; /* reset request */
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+	pmb->mbxCommand = MBX_READ_LNK_STAT;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free( pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+	lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+	lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+	lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+	lso->error_frames = pmb->un.varRdLnk.crcCnt;
+	if (phba->hba_flag & HBA_FCOE_MODE)
+		lso->link_events = (phba->link_events >> 1);
+	else
+		lso->link_events = (phba->fc_eventTag >> 1);
+
+	psli->stats_start = get_seconds();
+
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	return;
+}
+
+/*
+ * The LPFC driver treats linkdown handling as target loss events so there
+ * are no sysfs handlers for link_down_tmo.
+ */
+
+/**
+ * lpfc_get_node_by_target - Return the nodelist for a target
+ * @starget: kernel scsi target pointer.
+ *
+ * Returns:
+ * address of the node list if found
+ * NULL target not found
+ **/
+static struct lpfc_nodelist *
+lpfc_get_node_by_target(struct scsi_target *starget)
+{
+	struct Scsi_Host  *shost = dev_to_shost(starget->dev.parent);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	/* Search for this, mapped, target ID */
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (NLP_CHK_NODE_ACT(ndlp) &&
+		    ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+		    starget->id == ndlp->nlp_sid) {
+			spin_unlock_irq(shost->host_lock);
+			return ndlp;
+		}
+	}
+	spin_unlock_irq(shost->host_lock);
+	return NULL;
+}
+
+/**
+ * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
+ * @starget: kernel scsi target pointer.
+ **/
+static void
+lpfc_get_starget_port_id(struct scsi_target *starget)
+{
+	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+	fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
+}
+
+/**
+ * lpfc_get_starget_node_name - Set the target node name
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: Set the target node name to the ndlp node name wwn or zero.
+ **/
+static void
+lpfc_get_starget_node_name(struct scsi_target *starget)
+{
+	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+	fc_starget_node_name(starget) =
+		ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
+}
+
+/**
+ * lpfc_get_starget_port_name - Set the target port name
+ * @starget: kernel scsi target pointer.
+ *
+ * Description:  set the target port name to the ndlp port name wwn or zero.
+ **/
+static void
+lpfc_get_starget_port_name(struct scsi_target *starget)
+{
+	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+	fc_starget_port_name(starget) =
+		ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
+}
+
+/**
+ * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
+ * @rport: fc rport address.
+ * @timeout: new value for dev loss tmo.
+ *
+ * Description:
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ **/
+static void
+lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+	if (timeout)
+		rport->dev_loss_tmo = timeout;
+	else
+		rport->dev_loss_tmo = 1;
+}
+
+/**
+ * lpfc_rport_show_function - Return rport target information
+ *
+ * Description:
+ * Macro that uses field to generate a function with the name lpfc_show_rport_
+ *
+ * lpfc_show_rport_##field: returns the bytes formatted in buf
+ * @cdev: class converted to an fc_rport.
+ * @buf: on return contains the target_field or zero.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_rport_show_function(field, format_string, sz, cast)	\
+static ssize_t								\
+lpfc_show_rport_##field (struct device *dev,				\
+			 struct device_attribute *attr,			\
+			 char *buf)					\
+{									\
+	struct fc_rport *rport = transport_class_to_rport(dev);		\
+	struct lpfc_rport_data *rdata = rport->hostdata;		\
+	return snprintf(buf, sz, format_string,				\
+		(rdata->target) ? cast rdata->target->field : 0);	\
+}
+
+#define lpfc_rport_rd_attr(field, format_string, sz)			\
+	lpfc_rport_show_function(field, format_string, sz, )		\
+static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
+
+/**
+ * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
+ * @fc_vport: The fc_vport who's symbolic name has been changed.
+ *
+ * Description:
+ * This function is called by the transport after the @fc_vport's symbolic name
+ * has been changed. This function re-registers the symbolic name with the
+ * switch to propagate the change into the fabric if the vport is active.
+ **/
+static void
+lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+
+	if (vport->port_state == LPFC_VPORT_READY)
+		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+}
+
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log message according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+	phba->cfg_log_verbose = verbose;
+}
+
+struct fc_function_template lpfc_transport_functions = {
+	/* fixed attributes the driver supports */
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+
+	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
+	.show_host_symbolic_name = 1,
+
+	/* dynamic attributes the driver supports */
+	.get_host_port_id = lpfc_get_host_port_id,
+	.show_host_port_id = 1,
+
+	.get_host_port_type = lpfc_get_host_port_type,
+	.show_host_port_type = 1,
+
+	.get_host_port_state = lpfc_get_host_port_state,
+	.show_host_port_state = 1,
+
+	/* active_fc4s is shown but doesn't change (thus no get function) */
+	.show_host_active_fc4s = 1,
+
+	.get_host_speed = lpfc_get_host_speed,
+	.show_host_speed = 1,
+
+	.get_host_fabric_name = lpfc_get_host_fabric_name,
+	.show_host_fabric_name = 1,
+
+	/*
+	 * The LPFC driver treats linkdown handling as target loss events
+	 * so there are no sysfs handlers for link_down_tmo.
+	 */
+
+	.get_fc_host_stats = lpfc_get_stats,
+	.reset_fc_host_stats = lpfc_reset_stats,
+
+	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.get_starget_port_id  = lpfc_get_starget_port_id,
+	.show_starget_port_id = 1,
+
+	.get_starget_node_name = lpfc_get_starget_node_name,
+	.show_starget_node_name = 1,
+
+	.get_starget_port_name = lpfc_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	.issue_fc_host_lip = lpfc_issue_lip,
+	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+	.terminate_rport_io = lpfc_terminate_rport_io,
+
+	.dd_fcvport_size = sizeof(struct lpfc_vport *),
+
+	.vport_disable = lpfc_vport_disable,
+
+	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+
+	.bsg_request = lpfc_bsg_request,
+	.bsg_timeout = lpfc_bsg_timeout,
+};
+
+struct fc_function_template lpfc_vport_transport_functions = {
+	/* fixed attributes the driver supports */
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+
+	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
+	.show_host_symbolic_name = 1,
+
+	/* dynamic attributes the driver supports */
+	.get_host_port_id = lpfc_get_host_port_id,
+	.show_host_port_id = 1,
+
+	.get_host_port_type = lpfc_get_host_port_type,
+	.show_host_port_type = 1,
+
+	.get_host_port_state = lpfc_get_host_port_state,
+	.show_host_port_state = 1,
+
+	/* active_fc4s is shown but doesn't change (thus no get function) */
+	.show_host_active_fc4s = 1,
+
+	.get_host_speed = lpfc_get_host_speed,
+	.show_host_speed = 1,
+
+	.get_host_fabric_name = lpfc_get_host_fabric_name,
+	.show_host_fabric_name = 1,
+
+	/*
+	 * The LPFC driver treats linkdown handling as target loss events
+	 * so there are no sysfs handlers for link_down_tmo.
+	 */
+
+	.get_fc_host_stats = lpfc_get_stats,
+	.reset_fc_host_stats = lpfc_reset_stats,
+
+	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.get_starget_port_id  = lpfc_get_starget_port_id,
+	.show_starget_port_id = 1,
+
+	.get_starget_node_name = lpfc_get_starget_node_name,
+	.show_starget_node_name = 1,
+
+	.get_starget_port_name = lpfc_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+	.terminate_rport_io = lpfc_terminate_rport_io,
+
+	.vport_disable = lpfc_vport_disable,
+
+	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+};
+
+/**
+ * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
+ * @phba: lpfc_hba pointer.
+ **/
+void
+lpfc_get_cfgparam(struct lpfc_hba *phba)
+{
+	lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+	lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
+	lpfc_cr_delay_init(phba, lpfc_cr_delay);
+	lpfc_cr_count_init(phba, lpfc_cr_count);
+	lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
+	lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
+	lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
+	lpfc_ack0_init(phba, lpfc_ack0);
+	lpfc_topology_init(phba, lpfc_topology);
+	lpfc_link_speed_init(phba, lpfc_link_speed);
+	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+	lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
+	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+	lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
+	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+	lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
+	lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
+	lpfc_use_msi_init(phba, lpfc_use_msi);
+	lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
+	lpfc_auto_imax_init(phba, lpfc_auto_imax);
+	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+	lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
+	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
+	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+
+	lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		phba->cfg_EnableXLane = 0;
+	lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+
+	memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
+	memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
+	phba->cfg_oas_lun_state = 0;
+	phba->cfg_oas_lun_status = 0;
+	phba->cfg_oas_flags = 0;
+	phba->cfg_oas_priority = 0;
+	lpfc_enable_bg_init(phba, lpfc_enable_bg);
+	lpfc_prot_mask_init(phba, lpfc_prot_mask);
+	lpfc_prot_guard_init(phba, lpfc_prot_guard);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->cfg_poll = 0;
+	else
+		phba->cfg_poll = lpfc_poll;
+	lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
+
+	lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
+	lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
+
+	/* Initialize first burst. Target vs Initiator are different. */
+	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
+	lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
+	lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+	lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
+
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		/* NVME only supported on SLI4 */
+		phba->nvmet_support = 0;
+		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+		phba->cfg_enable_bbcr = 0;
+	} else {
+		/* We MUST have FCP support */
+		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+			phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
+	}
+
+	if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
+		phba->cfg_auto_imax = 0;
+	phba->initial_imax = phba->cfg_fcp_imax;
+
+	/* A value of 0 means use the number of CPUs found in the system */
+	if (phba->cfg_fcp_io_channel == 0)
+		phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+	if (phba->cfg_nvme_io_channel == 0)
+		phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
+
+	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+		phba->cfg_fcp_io_channel = 0;
+
+	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
+		phba->cfg_nvme_io_channel = 0;
+
+	if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
+		phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+	else
+		phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+
+	phba->cfg_soft_wwnn = 0L;
+	phba->cfg_soft_wwpn = 0L;
+	lpfc_xri_split_init(phba, lpfc_xri_split);
+	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
+	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+	lpfc_aer_support_init(phba, lpfc_aer_support);
+	lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
+	lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
+	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
+	lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+	lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
+	lpfc_sli_mode_init(phba, lpfc_sli_mode);
+	phba->cfg_enable_dss = 1;
+	lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
+	return;
+}
+
+/**
+ * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
+ * dependencies between protocols and roles.
+ * @phba: lpfc_hba pointer.
+ **/
+void
+lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
+{
+	if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
+		phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
+
+	if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
+		phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
+	    phba->nvmet_support) {
+		phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
+		phba->cfg_fcp_io_channel = 0;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+				"6013 %s x%x fb_size x%x, fb_max x%x\n",
+				"NVME Target PRLI ACC enable_fb ",
+				phba->cfg_nvme_enable_fb,
+				phba->cfg_nvmet_fb_size,
+				LPFC_NVMET_FB_SZ_MAX);
+
+		if (phba->cfg_nvme_enable_fb == 0)
+			phba->cfg_nvmet_fb_size = 0;
+		else {
+			if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
+				phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
+		}
+
+		/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
+		if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
+			phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+					"6018 Adjust lpfc_nvmet_mrq to %d\n",
+					phba->cfg_nvmet_mrq);
+		}
+	} else {
+		/* Not NVME Target mode.  Turn off Target parameters. */
+		phba->nvmet_support = 0;
+		phba->cfg_nvmet_mrq = 0;
+		phba->cfg_nvmet_fb_size = 0;
+	}
+
+	if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
+		phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+	else
+		phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+}
+
+/**
+ * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
+ * @vport: lpfc_vport pointer.
+ **/
+void
+lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
+{
+	lpfc_log_verbose_init(vport, lpfc_log_verbose);
+	lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
+	lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
+	lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
+	lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
+	lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
+	lpfc_restrict_login_init(vport, lpfc_restrict_login);
+	lpfc_fcp_class_init(vport, lpfc_fcp_class);
+	lpfc_use_adisc_init(vport, lpfc_use_adisc);
+	lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
+	lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
+	lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
+	lpfc_max_luns_init(vport, lpfc_max_luns);
+	lpfc_scan_down_init(vport, lpfc_scan_down);
+	lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
+	return;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_attr.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_attr.h
new file mode 100644
index 0000000..931db52
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_attr.h
@@ -0,0 +1,128 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_init(name, defval, minval, maxval)
+
+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_BBCR_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0444);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, 0444 | 0644,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_init(name, defval, minval, maxval)
+
+#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \
+static uint64_t lpfc_##name = defval;\
+module_param(lpfc_##name, ullong, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_bsg.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 0000000..08ed27b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,5527 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/mempool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/bsg-lib.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_bsg.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+struct lpfc_bsg_event {
+	struct list_head node;
+	struct kref kref;
+	wait_queue_head_t wq;
+
+	/* Event type and waiter identifiers */
+	uint32_t type_mask;
+	uint32_t req_id;
+	uint32_t reg_id;
+
+	/* next two flags are here for the auto-delete logic */
+	unsigned long wait_time_stamp;
+	int waiting;
+
+	/* seen and not seen events */
+	struct list_head events_to_get;
+	struct list_head events_to_see;
+
+	/* driver data associated with the job */
+	void *dd_data;
+};
+
+struct lpfc_bsg_iocb {
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_dmabuf *rmp;
+	struct lpfc_nodelist *ndlp;
+};
+
+struct lpfc_bsg_mbox {
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
+	uint8_t *ext; /* extended mailbox data */
+	uint32_t mbOffset; /* from app */
+	uint32_t inExtWLen; /* from app */
+	uint32_t outExtWLen; /* from app */
+};
+
+#define MENLO_DID 0x0000FC0E
+
+struct lpfc_bsg_menlo {
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_dmabuf *rmp;
+};
+
+#define TYPE_EVT 	1
+#define TYPE_IOCB	2
+#define TYPE_MBOX	3
+#define TYPE_MENLO	4
+struct bsg_job_data {
+	uint32_t type;
+	struct bsg_job *set_job; /* job waiting for this iocb to finish */
+	union {
+		struct lpfc_bsg_event *evt;
+		struct lpfc_bsg_iocb iocb;
+		struct lpfc_bsg_mbox mbox;
+		struct lpfc_bsg_menlo menlo;
+	} context_un;
+};
+
+struct event_data {
+	struct list_head node;
+	uint32_t type;
+	uint32_t immed_dat;
+	void *data;
+	uint32_t len;
+};
+
+#define BUF_SZ_4K 4096
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+	ELX_LOOPBACK_XRI_SETUP,
+	ELX_LOOPBACK_DATA,
+};
+
+#define ELX_LOOPBACK_HEADER_SZ \
+	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+
+struct lpfc_dmabufext {
+	struct lpfc_dmabuf dma;
+	uint32_t size;
+	uint32_t flag;
+};
+
+static void
+lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+	struct lpfc_dmabuf *mlast, *next_mlast;
+
+	if (mlist) {
+		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
+					 list) {
+			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+			list_del(&mlast->list);
+			kfree(mlast);
+		}
+		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+		kfree(mlist);
+	}
+	return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
+		       int outbound_buffers, struct ulp_bde64 *bpl,
+		       int *bpl_entries)
+{
+	struct lpfc_dmabuf *mlist = NULL;
+	struct lpfc_dmabuf *mp;
+	unsigned int bytes_left = size;
+
+	/* Verify we can support the size specified */
+	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
+		return NULL;
+
+	/* Determine the number of dma buffers to allocate */
+	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
+			size/LPFC_BPL_SIZE);
+
+	/* Allocate dma buffer and place in BPL passed */
+	while (bytes_left) {
+		/* Allocate dma buffer  */
+		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!mp) {
+			if (mlist)
+				lpfc_free_bsg_buffers(phba, mlist);
+			return NULL;
+		}
+
+		INIT_LIST_HEAD(&mp->list);
+		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+
+		if (!mp->virt) {
+			kfree(mp);
+			if (mlist)
+				lpfc_free_bsg_buffers(phba, mlist);
+			return NULL;
+		}
+
+		/* Queue it to a linked list */
+		if (!mlist)
+			mlist = mp;
+		else
+			list_add_tail(&mp->list, &mlist->list);
+
+		/* Add buffer to buffer pointer list */
+		if (outbound_buffers)
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+		bpl->tus.f.bdeSize = (uint16_t)
+			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
+			 bytes_left);
+		bytes_left -= bpl->tus.f.bdeSize;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+	}
+	return mlist;
+}
+
+static unsigned int
+lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
+		   struct bsg_buffer *bsg_buffers,
+		   unsigned int bytes_to_transfer, int to_buffers)
+{
+
+	struct lpfc_dmabuf *mp;
+	unsigned int transfer_bytes, bytes_copied = 0;
+	unsigned int sg_offset, dma_offset;
+	unsigned char *dma_address, *sg_address;
+	LIST_HEAD(temp_list);
+	struct sg_mapping_iter miter;
+	unsigned long flags;
+	unsigned int sg_flags = SG_MITER_ATOMIC;
+	bool sg_valid;
+
+	list_splice_init(&dma_buffers->list, &temp_list);
+	list_add(&dma_buffers->list, &temp_list);
+	sg_offset = 0;
+	if (to_buffers)
+		sg_flags |= SG_MITER_FROM_SG;
+	else
+		sg_flags |= SG_MITER_TO_SG;
+	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+		       sg_flags);
+	local_irq_save(flags);
+	sg_valid = sg_miter_next(&miter);
+	list_for_each_entry(mp, &temp_list, list) {
+		dma_offset = 0;
+		while (bytes_to_transfer && sg_valid &&
+		       (dma_offset < LPFC_BPL_SIZE)) {
+			dma_address = mp->virt + dma_offset;
+			if (sg_offset) {
+				/* Continue previous partial transfer of sg */
+				sg_address = miter.addr + sg_offset;
+				transfer_bytes = miter.length - sg_offset;
+			} else {
+				sg_address = miter.addr;
+				transfer_bytes = miter.length;
+			}
+			if (bytes_to_transfer < transfer_bytes)
+				transfer_bytes = bytes_to_transfer;
+			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
+				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
+			if (to_buffers)
+				memcpy(dma_address, sg_address, transfer_bytes);
+			else
+				memcpy(sg_address, dma_address, transfer_bytes);
+			dma_offset += transfer_bytes;
+			sg_offset += transfer_bytes;
+			bytes_to_transfer -= transfer_bytes;
+			bytes_copied += transfer_bytes;
+			if (sg_offset >= miter.length) {
+				sg_offset = 0;
+				sg_valid = sg_miter_next(&miter);
+			}
+		}
+	}
+	sg_miter_stop(&miter);
+	local_irq_restore(flags);
+	list_del_init(&dma_buffers->list);
+	list_splice(&temp_list, &dma_buffers->list);
+	return bytes_copied;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_send_mgmt_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp, *cmp, *rmp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_bsg_iocb *iocb;
+	unsigned long flags;
+	unsigned int rsp_size;
+	int rc = 0;
+
+	dd_data = cmdiocbq->context1;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		bsg_reply = job->reply;
+		/* Prevent timeout handling from trying to abort job */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Close the timeout handler abort window */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	iocb = &dd_data->context_un.iocb;
+	ndlp = iocb->ndlp;
+	rmp = iocb->rmp;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
+	rsp = &rspiocbq->iocb;
+
+	/* Copy the completed data or set the error status */
+
+	if (job) {
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
+				rc = -EACCES;
+			}
+		} else {
+			rsp_size = rsp->un.genreq64.bdl.bdeSize;
+			bsg_reply->reply_payload_rcv_len =
+				lpfc_bsg_copy_data(rmp, &job->reply_payload,
+						   rsp_size, 0);
+		}
+	}
+
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_free_bsg_buffers(phba, rmp);
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(dd_data);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct ulp_bde64 *bpl = NULL;
+	uint32_t timeout;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+	IOCB_t *cmd;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
+	int request_nseg;
+	int reply_nseg;
+	struct bsg_job_data *dd_data;
+	unsigned long flags;
+	uint32_t creg_val;
+	int rc = 0;
+	int iocb_stat;
+
+	/* in case no data is transferred */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2733 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	if (!lpfc_nlp_get(ndlp)) {
+		rc = -ENODEV;
+		goto no_ndlp;
+	}
+
+	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
+		rc = -ENODEV;
+		goto free_ndlp;
+	}
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_ndlp;
+	}
+
+	cmd = &cmdiocbq->iocb;
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_cmdiocbq;
+	}
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &request_nseg);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
+
+	bpl += request_nseg;
+	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+				     bpl, &reply_nseg);
+	if (!rmp) {
+		rc = -ENOMEM;
+		goto free_cmp;
+	}
+
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.genreq64.bdl.bdeSize =
+		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = ndlp->nlp_rpi;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+	cmd->ulpOwner = OWN_CHIP;
+	cmdiocbq->vport = phba->pport;
+	cmdiocbq->context3 = bmp;
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	timeout = phba->fc_ratov * 2;
+	cmd->ulpTimeout = timeout;
+
+	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = cmp;
+	cmdiocbq->context3 = bmp;
+	cmdiocbq->context_un.ndlp = ndlp;
+	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
+	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = rmp;
+	job->dd_data = dd_data;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -EIO ;
+			goto free_rmp;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+
+	if (iocb_stat == IOCB_SUCCESS) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O had not been completed yet */
+		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+			/* open up abort window to timeout handler */
+			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return 0; /* done for now */
+	} else if (iocb_stat == IOCB_BUSY) {
+		rc = -EAGAIN;
+	} else {
+		rc = -EIO;
+	}
+
+	/* iocb failed so cleanup */
+	job->dd_data = NULL;
+
+free_rmp:
+	lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+	lpfc_free_bsg_buffers(phba, cmp);
+free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
+free_cmdiocbq:
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_ndlp:
+	lpfc_nlp_put(ndlp);
+no_ndlp:
+	kfree(dd_data);
+no_dd_data:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_rport_els_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+	IOCB_t *rsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
+	struct fc_bsg_ctels_reply *els_reply;
+	uint8_t *rjt_data;
+	unsigned long flags;
+	unsigned int rsp_size;
+	int rc = 0;
+
+	dd_data = cmdiocbq->context1;
+	ndlp = dd_data->context_un.iocb.ndlp;
+	cmdiocbq->context1 = ndlp;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		bsg_reply = job->reply;
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Close the timeout handler abort window */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	rsp = &rspiocbq->iocb;
+	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
+
+	/* Copy the completed job data or determine the job status if job is
+	 * still active
+	 */
+
+	if (job) {
+		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+			bsg_reply->reply_payload_rcv_len =
+				sg_copy_from_buffer(job->reply_payload.sg_list,
+						    job->reply_payload.sg_cnt,
+						    prsp->virt,
+						    rsp_size);
+		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+			bsg_reply->reply_payload_rcv_len =
+				sizeof(struct fc_bsg_ctels_reply);
+			/* LS_RJT data returned in word 4 */
+			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+			els_reply = &bsg_reply->reply_data.ctels_reply;
+			els_reply->status = FC_CTELS_STATUS_REJECT;
+			els_reply->rjt_data.action = rjt_data[3];
+			els_reply->rjt_data.reason_code = rjt_data[2];
+			els_reply->rjt_data.reason_explanation = rjt_data[1];
+			els_reply->rjt_data.vendor_unique = rjt_data[0];
+		} else {
+			rc = -EIO;
+		}
+	}
+
+	lpfc_nlp_put(ndlp);
+	lpfc_els_free_iocb(phba, cmdiocbq);
+	kfree(dd_data);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_rport_els - send an ELS command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_rport_els(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t elscmd;
+	uint32_t cmdsize;
+	struct lpfc_iocbq *cmdiocbq;
+	uint16_t rpi = 0;
+	struct bsg_job_data *dd_data;
+	unsigned long flags;
+	uint32_t creg_val;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	/* verify the els command is not greater than the
+	 * maximum ELS transfer size.
+	 */
+
+	if (job->request_payload.payload_len > FCELSSIZE) {
+		rc = -EINVAL;
+		goto no_dd_data;
+	}
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2735 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	elscmd = bsg_request->rqst_data.r_els.els_code;
+	cmdsize = job->request_payload.payload_len;
+
+	if (!lpfc_nlp_get(ndlp)) {
+		rc = -ENODEV;
+		goto free_dd_data;
+	}
+
+	/* We will use the allocated dma buffers by prep els iocb for command
+	 * and response to ensure if the job times out and the request is freed,
+	 * we won't be dma into memory that is no longer allocated to for the
+	 * request.
+	 */
+
+	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
+				      ndlp->nlp_DID, elscmd);
+	if (!cmdiocbq) {
+		rc = -EIO;
+		goto release_ndlp;
+	}
+
+	rpi = ndlp->nlp_rpi;
+
+	/* Transfer the request payload to allocated command dma buffer */
+
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+			  cmdsize);
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+	else
+		cmdiocbq->iocb.ulpContext = rpi;
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context_un.ndlp = ndlp;
+	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
+	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = NULL;
+	job->dd_data = dd_data;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -EIO;
+			goto linkdown_err;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+
+	if (rc == IOCB_SUCCESS) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O had not been completed/released */
+		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+			/* open up abort window to timeout handler */
+			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return 0; /* done for now */
+	} else if (rc == IOCB_BUSY) {
+		rc = -EAGAIN;
+	} else {
+		rc = -EIO;
+	}
+
+	/* iocb failed so cleanup */
+	job->dd_data = NULL;
+
+linkdown_err:
+	cmdiocbq->context1 = ndlp;
+	lpfc_els_free_iocb(phba, cmdiocbq);
+
+release_ndlp:
+	lpfc_nlp_put(ndlp);
+
+free_dd_data:
+	kfree(dd_data);
+
+no_dd_data:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_event_free - frees an allocated event structure
+ * @kref: Pointer to a kref.
+ *
+ * Called from kref_put. Back cast the kref into an event structure address.
+ * Free any events to get, delete associated nodes, free any events to see,
+ * free any data then free the event itself.
+ **/
+static void
+lpfc_bsg_event_free(struct kref *kref)
+{
+	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
+						  kref);
+	struct event_data *ed;
+
+	list_del(&evt->node);
+
+	while (!list_empty(&evt->events_to_get)) {
+		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
+		list_del(&ed->node);
+		kfree(ed->data);
+		kfree(ed);
+	}
+
+	while (!list_empty(&evt->events_to_see)) {
+		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
+		list_del(&ed->node);
+		kfree(ed->data);
+		kfree(ed);
+	}
+
+	kfree(evt->dd_data);
+	kfree(evt);
+}
+
+/**
+ * lpfc_bsg_event_ref - increments the kref for an event
+ * @evt: Pointer to an event structure.
+ **/
+static inline void
+lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
+{
+	kref_get(&evt->kref);
+}
+
+/**
+ * lpfc_bsg_event_unref - Uses kref_put to free an event structure
+ * @evt: Pointer to an event structure.
+ **/
+static inline void
+lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
+{
+	kref_put(&evt->kref, lpfc_bsg_event_free);
+}
+
+/**
+ * lpfc_bsg_event_new - allocate and initialize a event structure
+ * @ev_mask: Mask of events.
+ * @ev_reg_id: Event reg id.
+ * @ev_req_id: Event request id.
+ **/
+static struct lpfc_bsg_event *
+lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
+{
+	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+
+	if (!evt)
+		return NULL;
+
+	INIT_LIST_HEAD(&evt->events_to_get);
+	INIT_LIST_HEAD(&evt->events_to_see);
+	evt->type_mask = ev_mask;
+	evt->req_id = ev_req_id;
+	evt->reg_id = ev_reg_id;
+	evt->wait_time_stamp = jiffies;
+	evt->dd_data = NULL;
+	init_waitqueue_head(&evt->wq);
+	kref_init(&evt->kref);
+	return evt;
+}
+
+/**
+ * diag_cmd_data_free - Frees an lpfc dma buffer extension
+ * @phba: Pointer to HBA context object.
+ * @mlist: Pointer to an lpfc dma buffer extension.
+ **/
+static int
+diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
+{
+	struct lpfc_dmabufext *mlast;
+	struct pci_dev *pcidev;
+	struct list_head head, *curr, *next;
+
+	if ((!mlist) || (!lpfc_is_link_up(phba) &&
+		(phba->link_flag & LS_LOOPBACK_MODE))) {
+		return 0;
+	}
+
+	pcidev = phba->pcidev;
+	list_add_tail(&head, &mlist->dma.list);
+
+	list_for_each_safe(curr, next, &head) {
+		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
+		if (mlast->dma.virt)
+			dma_free_coherent(&pcidev->dev,
+					  mlast->size,
+					  mlast->dma.virt,
+					  mlast->dma.phys);
+		kfree(mlast);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
+ * @phba:
+ * @pring:
+ * @piocbq:
+ *
+ * This function is called when an unsolicited CT command is received.  It
+ * forwards the event to any processes registered to receive CT events.
+ **/
+int
+lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *piocbq)
+{
+	uint32_t evt_req_id = 0;
+	uint32_t cmd;
+	struct lpfc_dmabuf *dmabuf = NULL;
+	struct lpfc_bsg_event *evt;
+	struct event_data *evt_dat = NULL;
+	struct lpfc_iocbq *iocbq;
+	size_t offset = 0;
+	struct list_head head;
+	struct ulp_bde64 *bde;
+	dma_addr_t dma_addr;
+	int i;
+	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+	struct lpfc_hbq_entry *hbqe;
+	struct lpfc_sli_ct_request *ct_req;
+	struct bsg_job *job = NULL;
+	struct fc_bsg_reply *bsg_reply;
+	struct bsg_job_data *dd_data = NULL;
+	unsigned long flags;
+	int size = 0;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &piocbq->list);
+
+	if (piocbq->iocb.ulpBdeCount == 0 ||
+	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
+		goto error_ct_unsol_exit;
+
+	if (phba->link_state == LPFC_HBA_ERROR ||
+		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
+		goto error_ct_unsol_exit;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+		dmabuf = bdeBuf1;
+	else {
+		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
+				    piocbq->iocb.un.cont64[0].addrLow);
+		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
+	}
+	if (dmabuf == NULL)
+		goto error_ct_unsol_exit;
+	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+	evt_req_id = ct_req->FsType;
+	cmd = ct_req->CommandResponse.bits.CmdRsp;
+	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
+			evt->req_id != evt_req_id)
+			continue;
+
+		lpfc_bsg_event_ref(evt);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
+		if (evt_dat == NULL) {
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+			lpfc_bsg_event_unref(evt);
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2614 Memory allocation failed for "
+					"CT event\n");
+			break;
+		}
+
+		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+			/* take accumulated byte count from the last iocbq */
+			iocbq = list_entry(head.prev, typeof(*iocbq), list);
+			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+		} else {
+			list_for_each_entry(iocbq, &head, list) {
+				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+					evt_dat->len +=
+					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+			}
+		}
+
+		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
+		if (evt_dat->data == NULL) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2615 Memory allocation failed for "
+					"CT event data, size %d\n",
+					evt_dat->len);
+			kfree(evt_dat);
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+			lpfc_bsg_event_unref(evt);
+			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+			goto error_ct_unsol_exit;
+		}
+
+		list_for_each_entry(iocbq, &head, list) {
+			size = 0;
+			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+				bdeBuf1 = iocbq->context2;
+				bdeBuf2 = iocbq->context3;
+			}
+			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+				if (phba->sli3_options &
+				    LPFC_SLI3_HBQ_ENABLED) {
+					if (i == 0) {
+						hbqe = (struct lpfc_hbq_entry *)
+						  &iocbq->iocb.un.ulpWord[0];
+						size = hbqe->bde.tus.f.bdeSize;
+						dmabuf = bdeBuf1;
+					} else if (i == 1) {
+						hbqe = (struct lpfc_hbq_entry *)
+							&iocbq->iocb.unsli3.
+							sli3Words[4];
+						size = hbqe->bde.tus.f.bdeSize;
+						dmabuf = bdeBuf2;
+					}
+					if ((offset + size) > evt_dat->len)
+						size = evt_dat->len - offset;
+				} else {
+					size = iocbq->iocb.un.cont64[i].
+						tus.f.bdeSize;
+					bde = &iocbq->iocb.un.cont64[i];
+					dma_addr = getPaddr(bde->addrHigh,
+							    bde->addrLow);
+					dmabuf = lpfc_sli_ringpostbuf_get(phba,
+							pring, dma_addr);
+				}
+				if (!dmabuf) {
+					lpfc_printf_log(phba, KERN_ERR,
+						LOG_LIBDFC, "2616 No dmabuf "
+						"found for iocbq 0x%p\n",
+						iocbq);
+					kfree(evt_dat->data);
+					kfree(evt_dat);
+					spin_lock_irqsave(&phba->ct_ev_lock,
+						flags);
+					lpfc_bsg_event_unref(evt);
+					spin_unlock_irqrestore(
+						&phba->ct_ev_lock, flags);
+					goto error_ct_unsol_exit;
+				}
+				memcpy((char *)(evt_dat->data) + offset,
+				       dmabuf->virt, size);
+				offset += size;
+				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
+				    !(phba->sli3_options &
+				      LPFC_SLI3_HBQ_ENABLED)) {
+					lpfc_sli_ringpostbuf_put(phba, pring,
+								 dmabuf);
+				} else {
+					switch (cmd) {
+					case ELX_LOOPBACK_DATA:
+						if (phba->sli_rev <
+						    LPFC_SLI_REV4)
+							diag_cmd_data_free(phba,
+							(struct lpfc_dmabufext
+							 *)dmabuf);
+						break;
+					case ELX_LOOPBACK_XRI_SETUP:
+						if ((phba->sli_rev ==
+							LPFC_SLI_REV2) ||
+							(phba->sli3_options &
+							LPFC_SLI3_HBQ_ENABLED
+							)) {
+							lpfc_in_buf_free(phba,
+									dmabuf);
+						} else {
+							lpfc_post_buffer(phba,
+									 pring,
+									 1);
+						}
+						break;
+					default:
+						if (!(phba->sli3_options &
+						      LPFC_SLI3_HBQ_ENABLED))
+							lpfc_post_buffer(phba,
+									 pring,
+									 1);
+						break;
+					}
+				}
+			}
+		}
+
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			evt_dat->immed_dat = phba->ctx_idx;
+			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
+			/* Provide warning for over-run of the ct_ctx array */
+			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
+			    UNSOL_VALID)
+				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+						"2717 CT context array entry "
+						"[%d] over-run: oxid:x%x, "
+						"sid:x%x\n", phba->ctx_idx,
+						phba->ct_ctx[
+						    evt_dat->immed_dat].oxid,
+						phba->ct_ctx[
+						    evt_dat->immed_dat].SID);
+			phba->ct_ctx[evt_dat->immed_dat].rxid =
+				piocbq->iocb.ulpContext;
+			phba->ct_ctx[evt_dat->immed_dat].oxid =
+				piocbq->iocb.unsli3.rcvsli3.ox_id;
+			phba->ct_ctx[evt_dat->immed_dat].SID =
+				piocbq->iocb.un.rcvels.remoteID;
+			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
+		} else
+			evt_dat->immed_dat = piocbq->iocb.ulpContext;
+
+		evt_dat->type = FC_REG_CT_EVENT;
+		list_add(&evt_dat->node, &evt->events_to_see);
+		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
+			wake_up_interruptible(&evt->wq);
+			lpfc_bsg_event_unref(evt);
+			break;
+		}
+
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+
+		dd_data = (struct bsg_job_data *)evt->dd_data;
+		job = dd_data->set_job;
+		dd_data->set_job = NULL;
+		lpfc_bsg_event_unref(evt);
+		if (job) {
+			bsg_reply = job->reply;
+			bsg_reply->reply_payload_rcv_len = size;
+			/* make error code available to userspace */
+			bsg_reply->result = 0;
+			job->dd_data = NULL;
+			/* complete the job back to userspace */
+			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+			bsg_job_done(job, bsg_reply->result,
+				       bsg_reply->reply_payload_rcv_len);
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+error_ct_unsol_exit:
+	if (!list_empty(&head))
+		list_del(&head);
+	if ((phba->sli_rev < LPFC_SLI_REV4) &&
+	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
+		return 0;
+	return 1;
+}
+
+/**
+ * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles abort to the CT command toward management plane
+ * for SLI4 port.
+ *
+ * If the pending context of a CT command to management plane present, clears
+ * such context and returns 1 for handled; otherwise, it returns 0 indicating
+ * no context exists.
+ **/
+int
+lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header fc_hdr;
+	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
+	int ctx_idx, handled = 0;
+	uint16_t oxid, rxid;
+	uint32_t sid;
+
+	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
+	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
+	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
+
+	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
+		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
+			continue;
+		if (phba->ct_ctx[ctx_idx].rxid != rxid)
+			continue;
+		if (phba->ct_ctx[ctx_idx].oxid != oxid)
+			continue;
+		if (phba->ct_ctx[ctx_idx].SID != sid)
+			continue;
+		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
+		handled = 1;
+	}
+	return handled;
+}
+
+/**
+ * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
+ * @job: SET_EVENT fc_bsg_job
+ **/
+static int
+lpfc_bsg_hba_set_event(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct set_ct_event *event_req;
+	struct lpfc_bsg_event *evt;
+	int rc = 0;
+	struct bsg_job_data *dd_data = NULL;
+	uint32_t ev_mask;
+	unsigned long flags;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2612 Received SET_CT_EVENT below minimum "
+				"size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_req = (struct set_ct_event *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
+				FC_REG_EVENT_MASK);
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (evt->reg_id == event_req->ev_reg_id) {
+			lpfc_bsg_event_ref(evt);
+			evt->wait_time_stamp = jiffies;
+			dd_data = (struct bsg_job_data *)evt->dd_data;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (&evt->node == &phba->ct_ev_waiters) {
+		/* no event waiting struct yet - first call */
+		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+		if (dd_data == NULL) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2734 Failed allocation of dd_data\n");
+			rc = -ENOMEM;
+			goto job_error;
+		}
+		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
+					event_req->ev_req_id);
+		if (!evt) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2617 Failed allocation of event "
+					"waiter\n");
+			rc = -ENOMEM;
+			goto job_error;
+		}
+		dd_data->type = TYPE_EVT;
+		dd_data->set_job = NULL;
+		dd_data->context_un.evt = evt;
+		evt->dd_data = (void *)dd_data;
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_add(&evt->node, &phba->ct_ev_waiters);
+		lpfc_bsg_event_ref(evt);
+		evt->wait_time_stamp = jiffies;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	evt->waiting = 1;
+	dd_data->set_job = job; /* for unsolicited command */
+	job->dd_data = dd_data; /* for fc transport timeout callback*/
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return 0; /* call job done later */
+
+job_error:
+	if (dd_data != NULL)
+		kfree(dd_data);
+
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
+ * @job: GET_EVENT fc_bsg_job
+ **/
+static int
+lpfc_bsg_hba_get_event(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct get_ct_event *event_req;
+	struct get_ct_event_reply *event_reply;
+	struct lpfc_bsg_event *evt, *evt_next;
+	struct event_data *evt_dat = NULL;
+	unsigned long flags;
+	uint32_t rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2613 Received GET_CT_EVENT request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_req = (struct get_ct_event *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	event_reply = (struct get_ct_event_reply *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
+		if (evt->reg_id == event_req->ev_reg_id) {
+			if (list_empty(&evt->events_to_get))
+				break;
+			lpfc_bsg_event_ref(evt);
+			evt->wait_time_stamp = jiffies;
+			evt_dat = list_entry(evt->events_to_get.prev,
+					     struct event_data, node);
+			list_del(&evt_dat->node);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* The app may continue to ask for event data until it gets
+	 * an error indicating that there isn't anymore
+	 */
+	if (evt_dat == NULL) {
+		bsg_reply->reply_payload_rcv_len = 0;
+		rc = -ENOENT;
+		goto job_error;
+	}
+
+	if (evt_dat->len > job->request_payload.payload_len) {
+		evt_dat->len = job->request_payload.payload_len;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2618 Truncated event data at %d "
+				"bytes\n",
+				job->request_payload.payload_len);
+	}
+
+	event_reply->type = evt_dat->type;
+	event_reply->immed_data = evt_dat->immed_dat;
+	if (evt_dat->len > 0)
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->request_payload.sg_list,
+					    job->request_payload.sg_cnt,
+					    evt_dat->data, evt_dat->len);
+	else
+		bsg_reply->reply_payload_rcv_len = 0;
+
+	if (evt_dat) {
+		kfree(evt_dat->data);
+		kfree(evt_dat);
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	job->dd_data = NULL;
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
+
+job_error:
+	job->dd_data = NULL;
+	bsg_reply->result = rc;
+	return rc;
+}
+
+/**
+ * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_issue_ct_rsp_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp, *cmp;
+	struct lpfc_nodelist *ndlp;
+	unsigned long flags;
+	int rc = 0;
+
+	dd_data = cmdiocbq->context1;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Close the timeout handler abort window */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	ndlp = dd_data->context_un.iocb.ndlp;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
+	rsp = &rspiocbq->iocb;
+
+	/* Copy the completed job data or set the error status */
+
+	if (job) {
+		bsg_reply = job->reply;
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
+				rc = -EACCES;
+			}
+		} else {
+			bsg_reply->reply_payload_rcv_len = 0;
+		}
+	}
+
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(dd_data);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+	return;
+}
+
+/**
+ * lpfc_issue_ct_rsp - issue a ct response
+ * @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
+ * @tag: tag index value into the ports context exchange array.
+ * @bmp: Pointer to a dma buffer descriptor.
+ * @num_entry: Number of enties in the bde.
+ **/
+static int
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
+		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
+		  int num_entry)
+{
+	IOCB_t *icmd;
+	struct lpfc_iocbq *ctiocb = NULL;
+	int rc = 0;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct bsg_job_data *dd_data;
+	unsigned long flags;
+	uint32_t creg_val;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2736 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	/* Allocate buffer for  command iocb */
+	ctiocb = lpfc_sli_get_iocbq(phba);
+	if (!ctiocb) {
+		rc = -ENOMEM;
+		goto no_ctiocb;
+	}
+
+	icmd = &ctiocb->iocb;
+	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	/* Fill in rest of iocb */
+	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Do not issue unsol response if oxid not marked as valid */
+		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+		icmd->ulpContext = phba->ct_ctx[tag].rxid;
+		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
+		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+		if (!ndlp) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+				 "2721 ndlp null for oxid %x SID %x\n",
+					icmd->ulpContext,
+					phba->ct_ctx[tag].SID);
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+
+		/* Check if the ndlp is active */
+		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+
+		/* get a refernece count so the ndlp doesn't go away while
+		 * we respond
+		 */
+		if (!lpfc_nlp_get(ndlp)) {
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+
+		icmd->un.ulpWord[3] =
+				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+		/* The exchange is done, mark the entry as invalid */
+		phba->ct_ctx[tag].valid = UNSOL_INVALID;
+	} else
+		icmd->ulpContext = (ushort) tag;
+
+	icmd->ulpTimeout = phba->fc_ratov * 2;
+
+	/* Xmit CT response on exchange <xid> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+
+	ctiocb->iocb_cmpl = NULL;
+	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+	ctiocb->vport = phba->pport;
+	ctiocb->context1 = dd_data;
+	ctiocb->context2 = cmp;
+	ctiocb->context3 = bmp;
+	ctiocb->context_un.ndlp = ndlp;
+	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+
+	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
+	dd_data->context_un.iocb.cmdiocbq = ctiocb;
+	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = NULL;
+	job->dd_data = dd_data;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+
+	if (rc == IOCB_SUCCESS) {
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O had not been completed/released */
+		if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+			/* open up abort window to timeout handler */
+			ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return 0; /* done for now */
+	}
+
+	/* iocb failed so cleanup */
+	job->dd_data = NULL;
+
+issue_ct_rsp_exit:
+	lpfc_sli_release_iocbq(phba, ctiocb);
+no_ctiocb:
+	kfree(dd_data);
+no_dd_data:
+	return rc;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
+ * @job: SEND_MGMT_RESP fc_bsg_job
+ **/
+static int
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	struct ulp_bde64 *bpl;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
+	int bpl_entries;
+	uint32_t tag = mgmt_resp->tag;
+	unsigned long reqbfrcnt =
+			(unsigned long)job->request_payload.payload_len;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
+		rc = -ERANGE;
+		goto send_mgmt_rsp_exit;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_exit;
+	}
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_free_bmp;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &bpl_entries);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_free_bmp;
+	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
+
+	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
+
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+	rc = -EACCES;
+
+	lpfc_free_bsg_buffers(phba, cmp);
+
+send_mgmt_rsp_free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
+send_mgmt_rsp_exit:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host *shost;
+	struct lpfc_sli *psli;
+	struct lpfc_queue *qp = NULL;
+	struct lpfc_sli_ring *pring;
+	int i = 0;
+
+	psli = &phba->sli;
+	if (!psli)
+		return -ENODEV;
+
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+		return -EACCES;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_block_requests(shost);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	} else {
+		shost = lpfc_shost_from_vport(phba->pport);
+		scsi_block_requests(shost);
+	}
+
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		pring = &psli->sli3_ring[LPFC_FCP_RING];
+		lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
+		return 0;
+	}
+	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+		pring = qp->pring;
+		if (!pring || (pring->ringno != LPFC_FCP_RING))
+			continue;
+		if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+				      &pring->ring_lock))
+			break;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_unblock_requests(shost);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	} else {
+		shost = lpfc_shost_from_vport(phba->pport);
+		scsi_unblock_requests(shost);
+	}
+	return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3  port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ * All new scsi requests are blocked, a small delay is used to allow the
+ * scsi requests to complete then the link is brought down. If the link is
+ * is placed in loopback mode then scsi requests are again allowed
+ * so the scsi mid-layer doesn't give up on the port.
+ * All of this is done in-line.
+ */
+static int
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct diag_mode_set *loopback_mode;
+	uint32_t link_flags;
+	uint32_t timeout;
+	LPFC_MBOXQ_t *pmboxq  = NULL;
+	int mbxstatus = MBX_SUCCESS;
+	int i = 0;
+	int rc = 0;
+
+	/* no data to return just the return code */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len < sizeof(struct fc_bsg_request) +
+	    sizeof(struct diag_mode_set)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2738 Received DIAG MODE request size:%d "
+				"below the minimum size:%d\n",
+				job->request_len,
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct diag_mode_set)));
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_diag_mode_enter(phba);
+	if (rc)
+		goto job_error;
+
+	/* bring the link to diagnostic mode */
+	loopback_mode = (struct diag_mode_set *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	link_flags = loopback_mode->type;
+	timeout = loopback_mode->timeout * 100;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto loopback_mode_exit;
+	}
+	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
+		/* wait for link down before proceeding */
+		i = 0;
+		while (phba->link_state != LPFC_LINK_DOWN) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				goto loopback_mode_exit;
+			}
+			msleep(10);
+		}
+
+		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		if (link_flags == INTERNAL_LOOP_BACK)
+			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+		else
+			pmboxq->u.mb.un.varInitLnk.link_flags =
+				FLAGS_TOPOLOGY_MODE_LOOP;
+
+		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
+		pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+						     LPFC_MBOX_TMO);
+
+		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+			rc = -ENODEV;
+		else {
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag |= LS_LOOPBACK_MODE;
+			spin_unlock_irq(&phba->hbalock);
+			/* wait for the link attention interrupt */
+			msleep(100);
+
+			i = 0;
+			while (phba->link_state != LPFC_HBA_READY) {
+				if (i++ > timeout) {
+					rc = -ETIMEDOUT;
+					break;
+				}
+
+				msleep(10);
+			}
+		}
+
+	} else
+		rc = -ENODEV;
+
+loopback_mode_exit:
+	lpfc_bsg_diag_mode_exit(phba);
+
+	/*
+	 * Let SLI layer release mboxq if mbox command completed after timeout.
+	 */
+	if (pmboxq && mbxstatus != MBX_TIMEOUT)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+	LPFC_MBOXQ_t *pmboxq;
+	struct lpfc_mbx_set_link_diag_state *link_diag_state;
+	uint32_t req_len, alloc_len;
+	int mbxstatus = MBX_SUCCESS, rc;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return -ENOMEM;
+
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+				req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len) {
+		rc = -ENOMEM;
+		goto link_diag_state_set_out;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+			diag, phba->sli4_hba.lnk_info.lnk_tp,
+			phba->sli4_hba.lnk_info.lnk_no);
+
+	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
+	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
+	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+	       phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+	       phba->sli4_hba.lnk_info.lnk_tp);
+	if (diag)
+		bf_set(lpfc_mbx_set_diag_state_diag,
+		       &link_diag_state->u.req, 1);
+	else
+		bf_set(lpfc_mbx_set_diag_state_diag,
+		       &link_diag_state->u.req, 0);
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+		rc = 0;
+	else
+		rc = -ENODEV;
+
+link_diag_state_set_out:
+	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmboxq;
+	uint32_t req_len, alloc_len;
+	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+	int mbxstatus = MBX_SUCCESS, rc = 0;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return -ENOMEM;
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+				req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len) {
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+	bf_set(lpfc_mbx_set_diag_state_link_num,
+	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_set_diag_state_link_type,
+	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3127 Failed setup loopback mode mailbox "
+				"command, rc:x%x, status:x%x\n", mbxstatus,
+				pmboxq->u.mb.mbxStatus);
+		rc = -ENODEV;
+	}
+	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+	int rc;
+
+	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3136 Port still had vfi registered: "
+				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+				phba->pport->fc_myDID, phba->fcf.fcfi,
+				phba->sli4_hba.vfi_ids[phba->pport->vfi],
+				phba->vpi_ids[phba->pport->vpi]);
+		return -EINVAL;
+	}
+	rc = lpfc_issue_reg_vfi(phba->pport);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct diag_mode_set *loopback_mode;
+	uint32_t link_flags, timeout;
+	int i, rc = 0;
+
+	/* no data to return just the return code */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len < sizeof(struct fc_bsg_request) +
+	    sizeof(struct diag_mode_set)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3011 Received DIAG MODE request size:%d "
+				"below the minimum size:%d\n",
+				job->request_len,
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct diag_mode_set)));
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_diag_mode_enter(phba);
+	if (rc)
+		goto job_error;
+
+	/* indicate we are in loobpack diagnostic mode */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_flag |= LS_LOOPBACK_MODE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* reset port to start frome scratch */
+	rc = lpfc_selective_reset(phba);
+	if (rc)
+		goto job_error;
+
+	/* bring the link to diagnostic mode */
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3129 Bring link to diagnostic state.\n");
+	loopback_mode = (struct diag_mode_set *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+	link_flags = loopback_mode->type;
+	timeout = loopback_mode->timeout * 100;
+
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3130 Failed to bring link to diagnostic "
+				"state, rc:x%x\n", rc);
+		goto loopback_mode_exit;
+	}
+
+	/* wait for link down before proceeding */
+	i = 0;
+	while (phba->link_state != LPFC_LINK_DOWN) {
+		if (i++ > timeout) {
+			rc = -ETIMEDOUT;
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3131 Timeout waiting for link to "
+					"diagnostic mode, timeout:%d ms\n",
+					timeout * 10);
+			goto loopback_mode_exit;
+		}
+		msleep(10);
+	}
+
+	/* set up loopback mode */
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3132 Set up loopback mode:x%x\n", link_flags);
+
+	if (link_flags == INTERNAL_LOOP_BACK)
+		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+	else if (link_flags == EXTERNAL_LOOP_BACK)
+		rc = lpfc_hba_init_link_fc_topology(phba,
+						    FLAGS_TOPOLOGY_MODE_PT_PT,
+						    MBX_NOWAIT);
+	else {
+		rc = -EINVAL;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3141 Loopback mode:x%x not supported\n",
+				link_flags);
+		goto loopback_mode_exit;
+	}
+
+	if (!rc) {
+		/* wait for the link attention interrupt */
+		msleep(100);
+		i = 0;
+		while (phba->link_state < LPFC_LINK_UP) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3137 Timeout waiting for link up "
+					"in loopback mode, timeout:%d ms\n",
+					timeout * 10);
+				break;
+			}
+			msleep(10);
+		}
+	}
+
+	/* port resource registration setup for loopback diagnostic */
+	if (!rc) {
+		/* set up a none zero myDID for loopback test */
+		phba->pport->fc_myDID = 1;
+		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+	} else
+		goto loopback_mode_exit;
+
+	if (!rc) {
+		/* wait for the port ready */
+		msleep(100);
+		i = 0;
+		while (phba->link_state != LPFC_HBA_READY) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3133 Timeout waiting for port "
+					"loopback mode ready, timeout:%d ms\n",
+					timeout * 10);
+				break;
+			}
+			msleep(10);
+		}
+	}
+
+loopback_mode_exit:
+	/* clear loopback diagnostic mode */
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		phba->link_flag &= ~LS_LOOPBACK_MODE;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	int rc;
+
+	shost = fc_bsg_to_shost(job);
+	if (!shost)
+		return -ENODEV;
+	vport = shost_priv(shost);
+	if (!vport)
+		return -ENODEV;
+	phba = vport->phba;
+	if (!phba)
+		return -ENODEV;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+		 LPFC_SLI_INTF_IF_TYPE_2)
+		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+	else
+		rc = -ENODEV;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	struct diag_mode_set *loopback_mode_end_cmd;
+	uint32_t timeout;
+	int rc, i;
+
+	shost = fc_bsg_to_shost(job);
+	if (!shost)
+		return -ENODEV;
+	vport = shost_priv(shost);
+	if (!vport)
+		return -ENODEV;
+	phba = vport->phba;
+	if (!phba)
+		return -ENODEV;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return -ENODEV;
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+	    LPFC_SLI_INTF_IF_TYPE_2)
+		return -ENODEV;
+
+	/* clear loopback diagnostic mode */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_flag &= ~LS_LOOPBACK_MODE;
+	spin_unlock_irq(&phba->hbalock);
+	loopback_mode_end_cmd = (struct diag_mode_set *)
+			bsg_request->rqst_data.h_vendor.vendor_cmd;
+	timeout = loopback_mode_end_cmd->timeout * 100;
+
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3139 Failed to bring link to diagnostic "
+				"state, rc:x%x\n", rc);
+		goto loopback_mode_end_exit;
+	}
+
+	/* wait for link down before proceeding */
+	i = 0;
+	while (phba->link_state != LPFC_LINK_DOWN) {
+		if (i++ > timeout) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3140 Timeout waiting for link to "
+					"diagnostic mode_end, timeout:%d ms\n",
+					timeout * 10);
+			/* there is nothing much we can do here */
+			break;
+		}
+		msleep(10);
+	}
+
+	/* reset port resource registrations */
+	rc = lpfc_selective_reset(phba);
+	phba->pport->fc_myDID = 0;
+
+loopback_mode_end_exit:
+	/* make return code available to userspace */
+	bsg_reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	LPFC_MBOXQ_t *pmboxq;
+	struct sli4_link_diag *link_diag_test_cmd;
+	uint32_t req_len, alloc_len;
+	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+	struct diag_status *diag_status_reply;
+	int mbxstatus, rc = 0;
+
+	shost = fc_bsg_to_shost(job);
+	if (!shost) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+	vport = shost_priv(shost);
+	if (!vport) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+	phba = vport->phba;
+	if (!phba) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+
+	if (job->request_len < sizeof(struct fc_bsg_request) +
+	    sizeof(struct sli4_link_diag)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3013 Received LINK DIAG TEST request "
+				" size:%d below the minimum size:%d\n",
+				job->request_len,
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct sli4_link_diag)));
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_diag_mode_enter(phba);
+	if (rc)
+		goto job_error;
+
+	link_diag_test_cmd = (struct sli4_link_diag *)
+			 bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+	if (rc)
+		goto job_error;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		goto link_diag_test_exit;
+
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+				     req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len)
+		goto link_diag_test_exit;
+
+	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+	       phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+	       phba->sli4_hba.lnk_info.lnk_tp);
+	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->test_id);
+	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->loops);
+	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->test_version);
+	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->error_action);
+
+	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || mbxstatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3010 Run link diag test mailbox failed with "
+				"mbx_status x%x status x%x, add_status x%x\n",
+				mbxstatus, shdr_status, shdr_add_status);
+	}
+
+	diag_status_reply = (struct diag_status *)
+			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3012 Received Run link diag test reply "
+				"below minimum size (%d): reply_len:%d\n",
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct diag_status)),
+				job->reply_len);
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	diag_status_reply->mbox_status = mbxstatus;
+	diag_status_reply->shdr_status = shdr_status;
+	diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfcdiag_loop_self_reg - obtains a remote port login id
+ * @phba: Pointer to HBA context object
+ * @rpi: Pointer to a remote port login id
+ *
+ * This function obtains a remote port login id so the diag loopback test
+ * can send and receive its own unsolicited CT command.
+ **/
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *dmabuff;
+	int status;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam,
+				mbox, *rpi);
+	else {
+		*rpi = lpfc_sli4_alloc_rpi(phba);
+		if (*rpi == LPFC_RPI_ALLOC_ERROR) {
+			mempool_free(mbox, phba->mbox_mem_pool);
+			return -EBUSY;
+		}
+		status = lpfc_reg_rpi(phba, phba->pport->vpi,
+				phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam,
+				mbox, *rpi);
+	}
+
+	if (status) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
+		return -ENOMEM;
+	}
+
+	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
+	mbox->context1 = NULL;
+	mbox->context2 = NULL;
+	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+		kfree(dmabuff);
+		if (status != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
+		return -ENODEV;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		*rpi = mbox->u.mb.un.varWords[0];
+
+	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+	kfree(dmabuff);
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
+ * lpfcdiag_loop_self_unreg - unregs from the rpi
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ *
+ * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
+ **/
+static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
+{
+	LPFC_MBOXQ_t *mbox;
+	int status;
+
+	/* Allocate mboxq structure */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox == NULL)
+		return -ENOMEM;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		lpfc_unreg_login(phba, 0, rpi, mbox);
+	else
+		lpfc_unreg_login(phba, phba->pport->vpi,
+				 phba->sli4_hba.rpi_ids[rpi], mbox);
+
+	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+		if (status != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	mempool_free(mbox, phba->mbox_mem_pool);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, rpi);
+	return 0;
+}
+
+/**
+ * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ * @txxri: Pointer to transmit exchange id
+ * @rxxri: Pointer to response exchabge id
+ *
+ * This function obtains the transmit and receive ids required to send
+ * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
+ * flags are used to the unsolicted response handler is able to process
+ * the ct command sent on the same port.
+ **/
+static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
+			 uint16_t *txxri, uint16_t * rxxri)
+{
+	struct lpfc_bsg_event *evt;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+	IOCB_t *cmd, *rsp;
+	struct lpfc_dmabuf *dmabuf;
+	struct ulp_bde64 *bpl = NULL;
+	struct lpfc_sli_ct_request *ctreq = NULL;
+	int ret_val = 0;
+	int time_left;
+	int iocb_stat = IOCB_SUCCESS;
+	unsigned long flags;
+
+	*txxri = 0;
+	*rxxri = 0;
+	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+				SLI_CT_ELX_LOOPBACK);
+	if (!evt)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_add(&evt->node, &phba->ct_ev_waiters);
+	lpfc_bsg_event_ref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+
+	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (dmabuf) {
+		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
+		if (dmabuf->virt) {
+			INIT_LIST_HEAD(&dmabuf->list);
+			bpl = (struct ulp_bde64 *) dmabuf->virt;
+			memset(bpl, 0, sizeof(*bpl));
+			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+			bpl->addrHigh =
+				le32_to_cpu(putPaddrHigh(dmabuf->phys +
+					sizeof(*bpl)));
+			bpl->addrLow =
+				le32_to_cpu(putPaddrLow(dmabuf->phys +
+					sizeof(*bpl)));
+			bpl->tus.f.bdeFlags = 0;
+			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+			bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		}
+	}
+
+	if (cmdiocbq == NULL || rspiocbq == NULL ||
+	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
+		dmabuf->virt == NULL) {
+		ret_val = -ENOMEM;
+		goto err_get_xri_exit;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	rsp = &rspiocbq->iocb;
+
+	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+
+	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	ctreq->RevisionId.bits.InId = 0;
+	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+	ctreq->FsSubType = 0;
+	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
+	ctreq->CommandResponse.bits.Size = 0;
+
+
+	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
+	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
+	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
+
+	cmd->un.xseq64.w5.hcsw.Fctl = LA;
+	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = rpi;
+
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+	cmdiocbq->iocb_cmpl = NULL;
+
+	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+				rspiocbq,
+				(phba->fc_ratov * 2)
+				+ LPFC_DRVR_TIMEOUT);
+	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
+		ret_val = -EIO;
+		goto err_get_xri_exit;
+	}
+	*txxri =  rsp->ulpContext;
+
+	evt->waiting = 1;
+	evt->wait_time_stamp = jiffies;
+	time_left = wait_event_interruptible_timeout(
+		evt->wq, !list_empty(&evt->events_to_see),
+		msecs_to_jiffies(1000 *
+			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+	if (list_empty(&evt->events_to_see))
+		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
+	else {
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		*rxxri = (list_entry(evt->events_to_get.prev,
+				     typeof(struct event_data),
+				     node))->immed_dat;
+	}
+	evt->waiting = 0;
+
+err_get_xri_exit:
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt); /* release ref */
+	lpfc_bsg_event_unref(evt); /* delete */
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (dmabuf) {
+		if (dmabuf->virt)
+			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+
+	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+	if (rspiocbq)
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+	return ret_val;
+}
+
+/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
+ * returns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_dmabuf *dmabuf;
+	struct pci_dev *pcidev = phba->pcidev;
+
+	/* allocate dma buffer struct */
+	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return NULL;
+
+	INIT_LIST_HEAD(&dmabuf->list);
+
+	/* now, allocate dma buffer */
+	dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+					   &(dmabuf->phys), GFP_KERNEL);
+
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return NULL;
+	}
+
+	return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+	struct pci_dev *pcidev = phba->pcidev;
+
+	if (!dmabuf)
+		return;
+
+	if (dmabuf->virt)
+		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+			    struct list_head *dmabuf_list)
+{
+	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+	if (list_empty(dmabuf_list))
+		return;
+
+	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+		list_del_init(&dmabuf->list);
+		lpfc_bsg_dma_page_free(phba, dmabuf);
+	}
+	return;
+}
+
+/**
+ * diag_cmd_data_alloc - fills in a bde struct with dma buffers
+ * @phba: Pointer to HBA context object
+ * @bpl: Pointer to 64 bit bde structure
+ * @size: Number of bytes to process
+ * @nocopydata: Flag to copy user data into the allocated buffer
+ *
+ * This function allocates page size buffers and populates an lpfc_dmabufext.
+ * If allowed the user data pointed to with indataptr is copied into the kernel
+ * memory. The chained list of page size buffers is returned.
+ **/
+static struct lpfc_dmabufext *
+diag_cmd_data_alloc(struct lpfc_hba *phba,
+		   struct ulp_bde64 *bpl, uint32_t size,
+		   int nocopydata)
+{
+	struct lpfc_dmabufext *mlist = NULL;
+	struct lpfc_dmabufext *dmp;
+	int cnt, offset = 0, i = 0;
+	struct pci_dev *pcidev;
+
+	pcidev = phba->pcidev;
+
+	while (size) {
+		/* We get chunks of 4K */
+		if (size > BUF_SZ_4K)
+			cnt = BUF_SZ_4K;
+		else
+			cnt = size;
+
+		/* allocate struct lpfc_dmabufext buffer header */
+		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
+		if (!dmp)
+			goto out;
+
+		INIT_LIST_HEAD(&dmp->dma.list);
+
+		/* Queue it to a linked list */
+		if (mlist)
+			list_add_tail(&dmp->dma.list, &mlist->dma.list);
+		else
+			mlist = dmp;
+
+		/* allocate buffer */
+		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
+						   cnt,
+						   &(dmp->dma.phys),
+						   GFP_KERNEL);
+
+		if (!dmp->dma.virt)
+			goto out;
+
+		dmp->size = cnt;
+
+		if (nocopydata) {
+			bpl->tus.f.bdeFlags = 0;
+			pci_dma_sync_single_for_device(phba->pcidev,
+				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+		} else {
+			memset((uint8_t *)dmp->dma.virt, 0, cnt);
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		}
+
+		/* build buffer ptr list for IOCB */
+		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
+		bpl->tus.f.bdeSize = (ushort) cnt;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+
+		i++;
+		offset += cnt;
+		size -= cnt;
+	}
+
+	if (mlist) {
+		mlist->flag = i;
+		return mlist;
+	}
+out:
+	diag_cmd_data_free(phba, mlist);
+	return NULL;
+}
+
+/**
+ * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * @phba: Pointer to HBA context object
+ * @rxxri: Receive exchange id
+ * @len: Number of data bytes
+ *
+ * This function allocates and posts a data buffer of sufficient size to receive
+ * an unsolicted CT command.
+ **/
+static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+			     size_t len)
+{
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *cmd = NULL;
+	struct list_head head, *curr, *next;
+	struct lpfc_dmabuf *rxbmp;
+	struct lpfc_dmabuf *dmp;
+	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
+	struct ulp_bde64 *rxbpl = NULL;
+	uint32_t num_bde;
+	struct lpfc_dmabufext *rxbuffer = NULL;
+	int ret_val = 0;
+	int iocb_stat;
+	int i = 0;
+
+	pring = lpfc_phba_elsring(phba);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (rxbmp != NULL) {
+		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+		if (rxbmp->virt) {
+			INIT_LIST_HEAD(&rxbmp->list);
+			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+		}
+	}
+
+	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
+		ret_val = -ENOMEM;
+		goto err_post_rxbufs_exit;
+	}
+
+	/* Queue buffers for the receive exchange */
+	num_bde = (uint32_t)rxbuffer->flag;
+	dmp = &rxbuffer->dma;
+
+	cmd = &cmdiocbq->iocb;
+	i = 0;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &dmp->list);
+	list_for_each_safe(curr, next, &head) {
+		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
+		list_del(curr);
+
+		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
+			cmd->un.quexri64cx.buff.bde.addrHigh =
+				putPaddrHigh(mp[i]->phys);
+			cmd->un.quexri64cx.buff.bde.addrLow =
+				putPaddrLow(mp[i]->phys);
+			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
+				((struct lpfc_dmabufext *)mp[i])->size;
+			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
+			cmd->ulpCommand = CMD_QUE_XRI64_CX;
+			cmd->ulpPU = 0;
+			cmd->ulpLe = 1;
+			cmd->ulpBdeCount = 1;
+			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
+
+		} else {
+			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
+			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
+			cmd->un.cont64[i].tus.f.bdeSize =
+				((struct lpfc_dmabufext *)mp[i])->size;
+					cmd->ulpBdeCount = ++i;
+
+			if ((--num_bde > 0) && (i < 2))
+				continue;
+
+			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
+			cmd->ulpLe = 1;
+		}
+
+		cmd->ulpClass = CLASS3;
+		cmd->ulpContext = rxxri;
+
+		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+						0);
+		if (iocb_stat == IOCB_ERROR) {
+			diag_cmd_data_free(phba,
+				(struct lpfc_dmabufext *)mp[0]);
+			if (mp[1])
+				diag_cmd_data_free(phba,
+					  (struct lpfc_dmabufext *)mp[1]);
+			dmp = list_entry(next, struct lpfc_dmabuf, list);
+			ret_val = -EIO;
+			goto err_post_rxbufs_exit;
+		}
+
+		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
+		if (mp[1]) {
+			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
+			mp[1] = NULL;
+		}
+
+		/* The iocb was freed by lpfc_sli_issue_iocb */
+		cmdiocbq = lpfc_sli_get_iocbq(phba);
+		if (!cmdiocbq) {
+			dmp = list_entry(next, struct lpfc_dmabuf, list);
+			ret_val = -EIO;
+			goto err_post_rxbufs_exit;
+		}
+
+		cmd = &cmdiocbq->iocb;
+		i = 0;
+	}
+	list_del(&head);
+
+err_post_rxbufs_exit:
+
+	if (rxbmp) {
+		if (rxbmp->virt)
+			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+		kfree(rxbmp);
+	}
+
+	if (cmdiocbq)
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+	return ret_val;
+}
+
+/**
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
+ * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
+ *
+ * This function receives a user data buffer to be transmitted and received on
+ * the same port, the link must be up and in loopback mode prior
+ * to being called.
+ * 1. A kernel buffer is allocated to copy the user data into.
+ * 2. The port registers with "itself".
+ * 3. The transmit and receive exchange ids are obtained.
+ * 4. The receive exchange id is posted.
+ * 5. A new els loopback event is created.
+ * 6. The command and response iocbs are allocated.
+ * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
+ *
+ * This function is meant to be called n times while the port is in loopback
+ * so it is the apps responsibility to issue a reset to take the port out
+ * of loopback mode.
+ **/
+static int
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_bsg_event *evt;
+	struct event_data *evdat;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t size;
+	uint32_t full_size;
+	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
+	uint16_t rpi = 0;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+	IOCB_t *cmd, *rsp = NULL;
+	struct lpfc_sli_ct_request *ctreq;
+	struct lpfc_dmabuf *txbmp;
+	struct ulp_bde64 *txbpl = NULL;
+	struct lpfc_dmabufext *txbuffer = NULL;
+	struct list_head head;
+	struct lpfc_dmabuf  *curr;
+	uint16_t txxri = 0, rxxri;
+	uint32_t num_bde;
+	uint8_t *ptr = NULL, *rx_databuf = NULL;
+	int rc = 0;
+	int time_left;
+	int iocb_stat = IOCB_SUCCESS;
+	unsigned long flags;
+	void *dataout = NULL;
+	uint32_t total_mem;
+
+	/* in case no data is returned return just the return code */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2739 Received DIAG TEST request below minimum "
+				"size\n");
+		rc = -EINVAL;
+		goto loopback_test_exit;
+	}
+
+	if (job->request_payload.payload_len !=
+		job->reply_payload.payload_len) {
+		rc = -EINVAL;
+		goto loopback_test_exit;
+	}
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = -EACCES;
+		goto loopback_test_exit;
+	}
+
+	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
+		rc = -EACCES;
+		goto loopback_test_exit;
+	}
+
+	size = job->request_payload.payload_len;
+	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
+
+	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
+		rc = -ERANGE;
+		goto loopback_test_exit;
+	}
+
+	if (full_size >= BUF_SZ_4K) {
+		/*
+		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
+		 * then we allocate 64k and re-use that buffer over and over to
+		 * xfer the whole block. This is because Linux kernel has a
+		 * problem allocating more than 120k of kernel space memory. Saw
+		 * problem with GET_FCPTARGETMAPPING...
+		 */
+		if (size <= (64 * 1024))
+			total_mem = full_size;
+		else
+			total_mem = 64 * 1024;
+	} else
+		/* Allocate memory for ioctl data */
+		total_mem = BUF_SZ_4K;
+
+	dataout = kmalloc(total_mem, GFP_KERNEL);
+	if (dataout == NULL) {
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	ptr = dataout;
+	ptr += ELX_LOOPBACK_HEADER_SZ;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+				job->request_payload.sg_cnt,
+				ptr, size);
+	rc = lpfcdiag_loop_self_reg(phba, &rpi);
+	if (rc)
+		goto loopback_test_exit;
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+		if (rc) {
+			lpfcdiag_loop_self_unreg(phba, rpi);
+			goto loopback_test_exit;
+		}
+
+		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+		if (rc) {
+			lpfcdiag_loop_self_unreg(phba, rpi);
+			goto loopback_test_exit;
+		}
+	}
+	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+				SLI_CT_ELX_LOOPBACK);
+	if (!evt) {
+		lpfcdiag_loop_self_unreg(phba, rpi);
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_add(&evt->node, &phba->ct_ev_waiters);
+	lpfc_bsg_event_ref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rspiocbq = lpfc_sli_get_iocbq(phba);
+	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+
+	if (txbmp) {
+		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
+		if (txbmp->virt) {
+			INIT_LIST_HEAD(&txbmp->list);
+			txbpl = (struct ulp_bde64 *) txbmp->virt;
+			txbuffer = diag_cmd_data_alloc(phba,
+							txbpl, full_size, 0);
+		}
+	}
+
+	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+		rc = -ENOMEM;
+		goto err_loopback_test_exit;
+	}
+	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
+		rc = -ENOMEM;
+		goto err_loopback_test_exit;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rsp = &rspiocbq->iocb;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &txbuffer->dma.list);
+	list_for_each_entry(curr, &head, list) {
+		segment_len = ((struct lpfc_dmabufext *)curr)->size;
+		if (current_offset == 0) {
+			ctreq = curr->virt;
+			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+			ctreq->RevisionId.bits.InId = 0;
+			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+			ctreq->FsSubType = 0;
+			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
+			ctreq->CommandResponse.bits.Size   = size;
+			segment_offset = ELX_LOOPBACK_HEADER_SZ;
+		} else
+			segment_offset = 0;
+
+		BUG_ON(segment_offset >= segment_len);
+		memcpy(curr->virt + segment_offset,
+			ptr + current_offset,
+			segment_len - segment_offset);
+
+		current_offset += segment_len - segment_offset;
+		BUG_ON(current_offset > size);
+	}
+	list_del(&head);
+
+	/* Build the XMIT_SEQUENCE iocb */
+	num_bde = (uint32_t)txbuffer->flag;
+
+	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
+	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
+	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
+
+	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		cmd->ulpContext = txxri;
+	} else {
+		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+		cmdiocbq->context3 = txbmp;
+		cmdiocbq->sli4_xritag = NO_XRI;
+		cmd->unsli3.rcvsli3.ox_id = 0xffff;
+	}
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
+	cmdiocbq->vport = phba->pport;
+	cmdiocbq->iocb_cmpl = NULL;
+	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+					     rspiocbq, (phba->fc_ratov * 2) +
+					     LPFC_DRVR_TIMEOUT);
+
+	if ((iocb_stat != IOCB_SUCCESS) ||
+	    ((phba->sli_rev < LPFC_SLI_REV4) &&
+	     (rsp->ulpStatus != IOSTAT_SUCCESS))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3126 Failed loopback test issue iocb: "
+				"iocb_stat:x%x\n", iocb_stat);
+		rc = -EIO;
+		goto err_loopback_test_exit;
+	}
+
+	evt->waiting = 1;
+	time_left = wait_event_interruptible_timeout(
+		evt->wq, !list_empty(&evt->events_to_see),
+		msecs_to_jiffies(1000 *
+			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
+	evt->waiting = 0;
+	if (list_empty(&evt->events_to_see)) {
+		rc = (time_left) ? -EINTR : -ETIMEDOUT;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3125 Not receiving unsolicited event, "
+				"rc:x%x\n", rc);
+	} else {
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		evdat = list_entry(evt->events_to_get.prev,
+				   typeof(*evdat), node);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		rx_databuf = evdat->data;
+		if (evdat->len != full_size) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1603 Loopback test did not receive expected "
+				"data length. actual length 0x%x expected "
+				"length 0x%x\n",
+				evdat->len, full_size);
+			rc = -EIO;
+		} else if (rx_databuf == NULL)
+			rc = -EIO;
+		else {
+			rc = IOCB_SUCCESS;
+			/* skip over elx loopback header */
+			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
+			bsg_reply->reply_payload_rcv_len =
+				sg_copy_from_buffer(job->reply_payload.sg_list,
+						    job->reply_payload.sg_cnt,
+						    rx_databuf, size);
+			bsg_reply->reply_payload_rcv_len = size;
+		}
+	}
+
+err_loopback_test_exit:
+	lpfcdiag_loop_self_unreg(phba, rpi);
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt); /* release ref */
+	lpfc_bsg_event_unref(evt); /* delete */
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+	if (rspiocbq != NULL)
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+
+	if (txbmp != NULL) {
+		if (txbpl != NULL) {
+			if (txbuffer != NULL)
+				diag_cmd_data_free(phba, txbuffer);
+			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
+		}
+		kfree(txbmp);
+	}
+
+loopback_test_exit:
+	kfree(dataout);
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	job->dd_data = NULL;
+	/* complete the job back to userspace if no error */
+	if (rc == IOCB_SUCCESS)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
+ * @job: GET_DFC_REV fc_bsg_job
+ **/
+static int
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_hba *phba = vport->phba;
+	struct get_mgmt_rev_reply *event_reply;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2740 Received GET_DFC_REV request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_reply = (struct get_mgmt_rev_reply *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2741 Received GET_DFC_REV reply below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
+	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
+job_error:
+	bsg_reply->result = rc;
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_bsg_issue_mbox function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+static void
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_reply *bsg_reply;
+	struct bsg_job *job;
+	uint32_t size;
+	unsigned long flags;
+	uint8_t *pmb, *pmb_buf;
+
+	dd_data = pmboxq->context1;
+
+	/*
+	 * The outgoing buffer is readily referred from the dma buffer,
+	 * just need to get header part from mailboxq structure.
+	 */
+	pmb = (uint8_t *)&pmboxq->u.mb;
+	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+	/* Determine if job has been aborted */
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Copy the mailbox data to the job if it is still active */
+
+	if (job) {
+		bsg_reply = job->reply;
+		size = job->reply_payload.payload_len;
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    pmb_buf, size);
+	}
+
+	dd_data->set_job = NULL;
+	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
+	kfree(dd_data);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		bsg_reply->result = 0;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_check_cmd_access - test for a supported mailbox command
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Some commands require the port to be offline, some may not be called from
+ * the application.
+ **/
+static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
+	MAILBOX_t *mb, struct lpfc_vport *vport)
+{
+	/* return negative error values for bsg job */
+	switch (mb->mbxCommand) {
+	/* Offline only */
+	case MBX_INIT_LINK:
+	case MBX_DOWN_LINK:
+	case MBX_CONFIG_LINK:
+	case MBX_CONFIG_RING:
+	case MBX_RESET_RING:
+	case MBX_UNREG_LOGIN:
+	case MBX_CLEAR_LA:
+	case MBX_DUMP_CONTEXT:
+	case MBX_RUN_DIAGS:
+	case MBX_RESTART:
+	case MBX_SET_MASK:
+		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2743 Command 0x%x is illegal in on-line "
+				"state\n",
+				mb->mbxCommand);
+			return -EPERM;
+		}
+	case MBX_WRITE_NV:
+	case MBX_WRITE_VPARMS:
+	case MBX_LOAD_SM:
+	case MBX_READ_NV:
+	case MBX_READ_CONFIG:
+	case MBX_READ_RCONFIG:
+	case MBX_READ_STATUS:
+	case MBX_READ_XRI:
+	case MBX_READ_REV:
+	case MBX_READ_LNK_STAT:
+	case MBX_DUMP_MEMORY:
+	case MBX_DOWN_LOAD:
+	case MBX_UPDATE_CFG:
+	case MBX_KILL_BOARD:
+	case MBX_READ_TOPOLOGY:
+	case MBX_LOAD_AREA:
+	case MBX_LOAD_EXP_ROM:
+	case MBX_BEACON:
+	case MBX_DEL_LD_ENTRY:
+	case MBX_SET_DEBUG:
+	case MBX_WRITE_WWN:
+	case MBX_SLI4_CONFIG:
+	case MBX_READ_EVENT_LOG:
+	case MBX_READ_EVENT_LOG_STATUS:
+	case MBX_WRITE_EVENT_LOG:
+	case MBX_PORT_CAPABILITIES:
+	case MBX_PORT_IOV_CONTROL:
+	case MBX_RUN_BIU_DIAG64:
+		break;
+	case MBX_SET_VARIABLE:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"1226 mbox: set_variable 0x%x, 0x%x\n",
+			mb->un.varWords[0],
+			mb->un.varWords[1]);
+		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
+			&& (mb->un.varWords[1] == 1)) {
+			phba->wait_4_mlo_maint_flg = 1;
+		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag &= ~LS_LOOPBACK_MODE;
+			spin_unlock_irq(&phba->hbalock);
+			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
+		}
+		break;
+	case MBX_READ_SPARM64:
+	case MBX_REG_LOGIN:
+	case MBX_REG_LOGIN64:
+	case MBX_CONFIG_PORT:
+	case MBX_RUN_BIU_DIAG:
+	default:
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+			"2742 Unknown Command 0x%x\n",
+			mb->mbxCommand);
+		return -EPERM;
+	}
+
+	return 0; /* ok */
+}
+
+/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+		return;
+
+	/* free all memory, including dma buffers */
+	lpfc_bsg_dma_page_list_free(phba,
+				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+	/* multi-buffer write mailbox command pass-through complete */
+	memset((char *)&phba->mbox_ext_buf_ctx, 0,
+	       sizeof(struct lpfc_mbox_ext_buf_ctx));
+	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+	return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job_data *dd_data;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+	uint8_t *pmb, *pmb_buf;
+	unsigned long flags;
+	uint32_t size;
+	int rc = 0;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint8_t *pmbx;
+
+	dd_data = pmboxq->context1;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		bsg_reply = job->reply;
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/*
+	 * The outgoing buffer is readily referred from the dma buffer,
+	 * just need to get header part from mailboxq structure.
+	 */
+
+	pmb = (uint8_t *)&pmboxq->u.mb;
+	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+	/* Copy the byte swapped response mailbox back to the user */
+	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+	/* if there is any non-embedded extended data copy that too */
+	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+		pmbx = (uint8_t *)dmabuf->virt;
+		/* byte swap the extended data following the mailbox command */
+		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+			&pmbx[sizeof(MAILBOX_t)],
+			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+	}
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		size = job->reply_payload.payload_len;
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    pmb_buf, size);
+
+		/* result for successful */
+		bsg_reply->result = 0;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2937 SLI_CONFIG ext-buffer maibox command "
+				"(x%x/x%x) complete bsg job done, bsize:%d\n",
+				phba->mbox_ext_buf_ctx.nembType,
+				phba->mbox_ext_buf_ctx.mboxType, size);
+		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
+					phba->mbox_ext_buf_ctx.nembType,
+					phba->mbox_ext_buf_ctx.mboxType,
+					dma_ebuf, sta_pos_addr,
+					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
+	} else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2938 SLI_CONFIG ext-buffer maibox "
+				"command (x%x/x%x) failure, rc:x%x\n",
+				phba->mbox_ext_buf_ctx.nembType,
+				phba->mbox_ext_buf_ctx.mboxType, rc);
+	}
+
+
+	/* state change */
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+	kfree(dd_data);
+	return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+	/* handle the BSG job with mailbox command */
+	if (!job)
+		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2939 SLI_CONFIG ext-buffer rd maibox command "
+			"complete, ctxState:x%x, mbxStatus:x%x\n",
+			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+		lpfc_bsg_mbox_ext_session_reset(phba);
+
+	/* free base driver mailbox structure memory */
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	/* if the job is still active, call job done */
+	if (job) {
+		bsg_reply = job->reply;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+	/* handle the BSG job with the mailbox command */
+	if (!job)
+		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2940 SLI_CONFIG ext-buffer wr maibox command "
+			"complete, ctxState:x%x, mbxStatus:x%x\n",
+			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+	/* free all memory, including dma buffers */
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_mbox_ext_session_reset(phba);
+
+	/* if the job is still active, call job done */
+	if (job) {
+		bsg_reply = job->reply;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+
+	return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+				struct lpfc_dmabuf *ext_dmabuf)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+	/* pointer to the start of mailbox command */
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+	if (nemb_tp == nemb_mse) {
+		if (index == 0) {
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_hi =
+				putPaddrHigh(mbx_dmabuf->phys +
+					     sizeof(MAILBOX_t));
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_lo =
+				putPaddrLow(mbx_dmabuf->phys +
+					    sizeof(MAILBOX_t));
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2943 SLI_CONFIG(mse)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+					index,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].buf_len,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_hi,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_lo);
+		} else {
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_hi =
+				putPaddrHigh(ext_dmabuf->phys);
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_lo =
+				putPaddrLow(ext_dmabuf->phys);
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2944 SLI_CONFIG(mse)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+					index,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].buf_len,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_hi,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_lo);
+		}
+	} else {
+		if (index == 0) {
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi =
+				putPaddrHigh(mbx_dmabuf->phys +
+					     sizeof(MAILBOX_t));
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo =
+				putPaddrLow(mbx_dmabuf->phys +
+					    sizeof(MAILBOX_t));
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3007 SLI_CONFIG(hbd)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+				index,
+				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+				&sli_cfg_mbx->un.
+				sli_config_emb1_subsys.hbd[index]),
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi,
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo);
+
+		} else {
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi =
+				putPaddrHigh(ext_dmabuf->phys);
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo =
+				putPaddrLow(ext_dmabuf->phys);
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3008 SLI_CONFIG(hbd)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+				index,
+				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+				&sli_cfg_mbx->un.
+				sli_config_emb1_subsys.hbd[index]),
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi,
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo);
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
+			      enum nemb_type nemb_tp,
+			      struct lpfc_dmabuf *dmabuf)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	struct dfc_mbox_req *mbox_req;
+	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+	uint32_t ext_buf_cnt, ext_buf_index;
+	struct lpfc_dmabuf *ext_dmabuf = NULL;
+	struct bsg_job_data *dd_data = NULL;
+	LPFC_MBOXQ_t *pmboxq = NULL;
+	MAILBOX_t *pmb;
+	uint8_t *pmbx;
+	int rc, i;
+
+	mbox_req =
+	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	/* pointer to the start of mailbox command */
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+	if (nemb_tp == nemb_mse) {
+		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2945 Handled SLI_CONFIG(mse) rd, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_MSE);
+			rc = -ERANGE;
+			goto job_error;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2941 Handled SLI_CONFIG(mse) rd, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	} else {
+		/* sanity check on interface type for support */
+		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+		    LPFC_SLI_INTF_IF_TYPE_2) {
+			rc = -ENODEV;
+			goto job_error;
+		}
+		/* nemb_tp == nemb_hbd */
+		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2946 Handled SLI_CONFIG(hbd) rd, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_HBD);
+			rc = -ERANGE;
+			goto job_error;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2942 Handled SLI_CONFIG(hbd) rd, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	}
+
+	/* before dma descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+					sta_pre_addr, dmabuf, ext_buf_cnt);
+
+	/* reject non-embedded mailbox command with none external buffer */
+	if (ext_buf_cnt == 0) {
+		rc = -EPERM;
+		goto job_error;
+	} else if (ext_buf_cnt > 1) {
+		/* additional external read buffers */
+		for (i = 1; i < ext_buf_cnt; i++) {
+			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+			if (!ext_dmabuf) {
+				rc = -ENOMEM;
+				goto job_error;
+			}
+			list_add_tail(&ext_dmabuf->list,
+				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+		}
+	}
+
+	/* bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		rc = -ENOMEM;
+		goto job_error;
+	}
+
+	/* mailbox command structure for base driver */
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto job_error;
+	}
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+	/* for the first external buffer */
+	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+	/* for the rest of external buffer descriptors if any */
+	if (ext_buf_cnt > 1) {
+		ext_buf_index = 1;
+		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+						ext_buf_index, dmabuf,
+						curr_dmabuf);
+			ext_buf_index++;
+		}
+	}
+
+	/* after dma descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+					sta_pos_addr, dmabuf, ext_buf_cnt);
+
+	/* construct base driver mbox command */
+	pmb = &pmboxq->u.mb;
+	pmbx = (uint8_t *)dmabuf->virt;
+	memcpy(pmb, pmbx, sizeof(*pmb));
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->vport = phba->pport;
+
+	/* multi-buffer handling context */
+	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+	/* callback for multi-buffer read mailbox command */
+	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+	/* context fields to callback function */
+	pmboxq->context1 = dd_data;
+	dd_data->type = TYPE_MBOX;
+	dd_data->set_job = job;
+	dd_data->context_un.mbox.pmboxq = pmboxq;
+	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+	job->dd_data = dd_data;
+
+	/* state change */
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+	/*
+	 * Non-embedded mailbox subcommand data gets byte swapped here because
+	 * the lower level driver code only does the first 64 mailbox words.
+	 */
+	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+		(nemb_tp == nemb_mse))
+		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+			&pmbx[sizeof(MAILBOX_t)],
+				sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[0].buf_len);
+
+	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2947 Issued SLI_CONFIG ext-buffer "
+				"maibox command, rc:x%x\n", rc);
+		return SLI_CONFIG_HANDLED;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"2948 Failed to issue SLI_CONFIG ext-buffer "
+			"maibox command, rc:x%x\n", rc);
+	rc = -EPIPE;
+
+job_error:
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_list_free(phba,
+				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+	kfree(dd_data);
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
+			       enum nemb_type nemb_tp,
+			       struct lpfc_dmabuf *dmabuf)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct dfc_mbox_req *mbox_req;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint32_t ext_buf_cnt;
+	struct bsg_job_data *dd_data = NULL;
+	LPFC_MBOXQ_t *pmboxq = NULL;
+	MAILBOX_t *pmb;
+	uint8_t *mbx;
+	int rc = SLI_CONFIG_NOT_HANDLED, i;
+
+	mbox_req =
+	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	/* pointer to the start of mailbox command */
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+	if (nemb_tp == nemb_mse) {
+		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2953 Failed SLI_CONFIG(mse) wr, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_MSE);
+			return -ERANGE;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2949 Handled SLI_CONFIG(mse) wr, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	} else {
+		/* sanity check on interface type for support */
+		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+		    LPFC_SLI_INTF_IF_TYPE_2)
+			return -ENODEV;
+		/* nemb_tp == nemb_hbd */
+		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2954 Failed SLI_CONFIG(hbd) wr, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_HBD);
+			return -ERANGE;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2950 Handled SLI_CONFIG(hbd) wr, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	}
+
+	/* before dma buffer descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+					sta_pre_addr, dmabuf, ext_buf_cnt);
+
+	if (ext_buf_cnt == 0)
+		return -EPERM;
+
+	/* for the first external buffer */
+	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+	/* after dma descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+					sta_pos_addr, dmabuf, ext_buf_cnt);
+
+	/* log for looking forward */
+	for (i = 1; i < ext_buf_cnt; i++) {
+		if (nemb_tp == nemb_mse)
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[i].buf_len);
+		else
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+				&sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[i]));
+	}
+
+	/* multi-buffer handling context */
+	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+	if (ext_buf_cnt == 1) {
+		/* bsg tracking structure */
+		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+		if (!dd_data) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+
+		/* mailbox command structure for base driver */
+		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!pmboxq) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		pmb = &pmboxq->u.mb;
+		mbx = (uint8_t *)dmabuf->virt;
+		memcpy(pmb, mbx, sizeof(*pmb));
+		pmb->mbxOwner = OWN_HOST;
+		pmboxq->vport = phba->pport;
+
+		/* callback for multi-buffer read mailbox command */
+		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+		/* context fields to callback function */
+		pmboxq->context1 = dd_data;
+		dd_data->type = TYPE_MBOX;
+		dd_data->set_job = job;
+		dd_data->context_un.mbox.pmboxq = pmboxq;
+		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+		job->dd_data = dd_data;
+
+		/* state change */
+
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2955 Issued SLI_CONFIG ext-buffer "
+					"maibox command, rc:x%x\n", rc);
+			return SLI_CONFIG_HANDLED;
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2956 Failed to issue SLI_CONFIG ext-buffer "
+				"maibox command, rc:x%x\n", rc);
+		rc = -EPIPE;
+		goto job_error;
+	}
+
+	/* wait for additoinal external buffers */
+
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return SLI_CONFIG_HANDLED;
+
+job_error:
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	kfree(dd_data);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
+			     struct lpfc_dmabuf *dmabuf)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint32_t subsys;
+	uint32_t opcode;
+	int rc = SLI_CONFIG_NOT_HANDLED;
+
+	/* state change on new multi-buffer pass-through mailbox command */
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
+		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
+		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+			switch (opcode) {
+			case FCOE_OPCODE_READ_FCF:
+			case FCOE_OPCODE_GET_DPORT_RESULTS:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2957 Handled SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			case FCOE_OPCODE_ADD_FCF:
+			case FCOE_OPCODE_SET_DPORT_MODE:
+			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2958 Handled SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2959 Reject SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = -EPERM;
+				break;
+			}
+		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+			switch (opcode) {
+			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
+			case COMN_OPCODE_GET_PROFILE_CONFIG:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"3106 Handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"3107 Reject SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = -EPERM;
+				break;
+			}
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2977 Reject SLI_CONFIG "
+					"subsys:x%d, opcode:x%x\n",
+					subsys, opcode);
+			rc = -EPERM;
+		}
+	} else {
+		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
+		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
+		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+			switch (opcode) {
+			case COMN_OPCODE_READ_OBJECT:
+			case COMN_OPCODE_READ_OBJECT_LIST:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2960 Handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_hbd, dmabuf);
+				break;
+			case COMN_OPCODE_WRITE_OBJECT:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2961 Handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+							nemb_hbd, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2962 Not handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = SLI_CONFIG_NOT_HANDLED;
+				break;
+			}
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2978 Not handled SLI_CONFIG "
+					"subsys:x%d, opcode:x%x\n",
+					subsys, opcode);
+			rc = SLI_CONFIG_NOT_HANDLED;
+		}
+	}
+
+	/* state reset on not handled new multi-buffer mailbox command */
+	if (rc != SLI_CONFIG_HANDLED)
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+	else
+		lpfc_bsg_mbox_ext_session_reset(phba);
+	return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
+{
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	struct lpfc_dmabuf *dmabuf;
+	uint8_t *pbuf;
+	uint32_t size;
+	uint32_t index;
+
+	index = phba->mbox_ext_buf_ctx.seqNum;
+	phba->mbox_ext_buf_ctx.seqNum++;
+
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2963 SLI_CONFIG (mse) ext-buffer rd get "
+				"buffer[%d], size:%d\n", index, size);
+	} else {
+		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
+				"buffer[%d], size:%d\n", index, size);
+	}
+	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+		return -EPIPE;
+	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+				  struct lpfc_dmabuf, list);
+	list_del_init(&dmabuf->list);
+
+	/* after dma buffer descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+					mbox_rd, dma_ebuf, sta_pos_addr,
+					dmabuf, index);
+
+	pbuf = (uint8_t *)dmabuf->virt;
+	bsg_reply->reply_payload_rcv_len =
+		sg_copy_from_buffer(job->reply_payload.sg_list,
+				    job->reply_payload.sg_cnt,
+				    pbuf, size);
+
+	lpfc_bsg_dma_page_free(phba, dmabuf);
+
+	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+				"command session done\n");
+		lpfc_bsg_mbox_ext_session_reset(phba);
+	}
+
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+
+	return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
+			struct lpfc_dmabuf *dmabuf)
+{
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct bsg_job_data *dd_data = NULL;
+	LPFC_MBOXQ_t *pmboxq = NULL;
+	MAILBOX_t *pmb;
+	enum nemb_type nemb_tp;
+	uint8_t *pbuf;
+	uint32_t size;
+	uint32_t index;
+	int rc;
+
+	index = phba->mbox_ext_buf_ctx.seqNum;
+	phba->mbox_ext_buf_ctx.seqNum++;
+	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+	pbuf = (uint8_t *)dmabuf->virt;
+	size = job->request_payload.payload_len;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  pbuf, size);
+
+	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2966 SLI_CONFIG (mse) ext-buffer wr set "
+				"buffer[%d], size:%d\n",
+				phba->mbox_ext_buf_ctx.seqNum, size);
+
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
+				"buffer[%d], size:%d\n",
+				phba->mbox_ext_buf_ctx.seqNum, size);
+
+	}
+
+	/* set up external buffer descriptor and add to external buffer list */
+	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+					phba->mbox_ext_buf_ctx.mbx_dmabuf,
+					dmabuf);
+	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+	/* after write dma buffer */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+					mbox_wr, dma_ebuf, sta_pos_addr,
+					dmabuf, index);
+
+	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2968 SLI_CONFIG ext-buffer wr all %d "
+				"ebuffers received\n",
+				phba->mbox_ext_buf_ctx.numBuf);
+
+		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+		if (!dd_data) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+
+		/* mailbox command structure for base driver */
+		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!pmboxq) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+		pmb = &pmboxq->u.mb;
+		memcpy(pmb, pbuf, sizeof(*pmb));
+		pmb->mbxOwner = OWN_HOST;
+		pmboxq->vport = phba->pport;
+
+		/* callback for multi-buffer write mailbox command */
+		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+		/* context fields to callback function */
+		pmboxq->context1 = dd_data;
+		dd_data->type = TYPE_MBOX;
+		dd_data->set_job = job;
+		dd_data->context_un.mbox.pmboxq = pmboxq;
+		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+		job->dd_data = dd_data;
+
+		/* state change */
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2969 Issued SLI_CONFIG ext-buffer "
+					"maibox command, rc:x%x\n", rc);
+			return SLI_CONFIG_HANDLED;
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2970 Failed to issue SLI_CONFIG ext-buffer "
+				"maibox command, rc:x%x\n", rc);
+		rc = -EPIPE;
+		goto job_error;
+	}
+
+	/* wait for additoinal external buffers */
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return SLI_CONFIG_HANDLED;
+
+job_error:
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_free(phba, dmabuf);
+	kfree(dd_data);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
+			     struct lpfc_dmabuf *dmabuf)
+{
+	int rc;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2971 SLI_CONFIG buffer (type:x%x)\n",
+			phba->mbox_ext_buf_ctx.mboxType);
+
+	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2972 SLI_CONFIG rd buffer state "
+					"mismatch:x%x\n",
+					phba->mbox_ext_buf_ctx.state);
+			lpfc_bsg_mbox_ext_abort(phba);
+			return -EPIPE;
+		}
+		rc = lpfc_bsg_read_ebuf_get(phba, job);
+		if (rc == SLI_CONFIG_HANDLED)
+			lpfc_bsg_dma_page_free(phba, dmabuf);
+	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2973 SLI_CONFIG wr buffer state "
+					"mismatch:x%x\n",
+					phba->mbox_ext_buf_ctx.state);
+			lpfc_bsg_mbox_ext_abort(phba);
+			return -EPIPE;
+		}
+		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
+			    struct lpfc_dmabuf *dmabuf)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct dfc_mbox_req *mbox_req;
+	int rc = SLI_CONFIG_NOT_HANDLED;
+
+	mbox_req =
+	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	/* mbox command with/without single external buffer */
+	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+		return rc;
+
+	/* mbox command and first external buffer */
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+		if (mbox_req->extSeqNum == 1) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2974 SLI_CONFIG mailbox: tag:%d, "
+					"seq:%d\n", mbox_req->extMboxTag,
+					mbox_req->extSeqNum);
+			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+			return rc;
+		} else
+			goto sli_cfg_ext_error;
+	}
+
+	/*
+	 * handle additional external buffers
+	 */
+
+	/* check broken pipe conditions */
+	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+		goto sli_cfg_ext_error;
+	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+		goto sli_cfg_ext_error;
+	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+		goto sli_cfg_ext_error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2975 SLI_CONFIG mailbox external buffer: "
+			"extSta:x%x, tag:%d, seq:%d\n",
+			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+			mbox_req->extSeqNum);
+	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+	return rc;
+
+sli_cfg_ext_error:
+	/* all other cases, broken pipe */
+	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"2976 SLI_CONFIG mailbox broken pipe: "
+			"ctxSta:x%x, ctxNumBuf:%d "
+			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+			phba->mbox_ext_buf_ctx.state,
+			phba->mbox_ext_buf_ctx.numBuf,
+			phba->mbox_ext_buf_ctx.mbxTag,
+			phba->mbox_ext_buf_ctx.seqNum,
+			mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+	lpfc_bsg_mbox_ext_session_reset(phba);
+
+	return -EPIPE;
+}
+
+/**
+ * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Allocate a tracking object, mailbox command memory, get a mailbox
+ * from the mailbox pool, copy the caller mailbox command.
+ *
+ * If offline and the sli is active we need to poll for the command (port is
+ * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * let our completion handler finish the command.
+ **/
+static int
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
+	struct lpfc_vport *vport)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+	/* a 4k buffer to hold the mb and extended data from/to the bsg */
+	uint8_t *pmbx = NULL;
+	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
+	struct lpfc_dmabuf *dmabuf = NULL;
+	struct dfc_mbox_req *mbox_req;
+	struct READ_EVENT_LOG_VAR *rdEventLog;
+	uint32_t transmit_length, receive_length, mode;
+	struct lpfc_mbx_sli4_config *sli4_config;
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+	struct ulp_bde64 *bde;
+	uint8_t *ext = NULL;
+	int rc = 0;
+	uint8_t *from;
+	uint32_t size;
+
+	/* in case no data is transferred */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	/* sanity check to protect driver */
+	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
+	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
+		rc = -ERANGE;
+		goto job_done;
+	}
+
+	/*
+	 * Don't allow mailbox commands to be sent when blocked or when in
+	 * the middle of discovery
+	 */
+	 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+		rc = -EAGAIN;
+		goto job_done;
+	}
+
+	mbox_req =
+	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	/* check if requested extended data lengths are valid */
+	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
+	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
+		rc = -ERANGE;
+		goto job_done;
+	}
+
+	dmabuf = lpfc_bsg_dma_page_alloc(phba);
+	if (!dmabuf || !dmabuf->virt) {
+		rc = -ENOMEM;
+		goto job_done;
+	}
+
+	/* Get the mailbox command or external buffer from BSG */
+	pmbx = (uint8_t *)dmabuf->virt;
+	size = job->request_payload.payload_len;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, pmbx, size);
+
+	/* Handle possible SLI_CONFIG with non-embedded payloads */
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+		if (rc == SLI_CONFIG_HANDLED)
+			goto job_cont;
+		if (rc)
+			goto job_done;
+		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+	}
+
+	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+	if (rc != 0)
+		goto job_done; /* must be negative */
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2727 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto job_done;
+	}
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto job_done;
+	}
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	memcpy(pmb, pmbx, sizeof(*pmb));
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->vport = vport;
+
+	/* If HBA encountered an error attention, allow only DUMP
+	 * or RESTART mailbox commands until the HBA is restarted.
+	 */
+	if (phba->pport->stopped &&
+	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
+	    pmb->mbxCommand != MBX_RESTART &&
+	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
+	    pmb->mbxCommand != MBX_WRITE_WWN)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"2797 mbox: Issued mailbox cmd "
+				"0x%x while in stopped state.\n",
+				pmb->mbxCommand);
+
+	/* extended mailbox commands will need an extended buffer */
+	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
+		from = pmbx;
+		ext = from + sizeof(MAILBOX_t);
+		pmboxq->context2 = ext;
+		pmboxq->in_ext_byte_len =
+			mbox_req->inExtWLen * sizeof(uint32_t);
+		pmboxq->out_ext_byte_len =
+			mbox_req->outExtWLen * sizeof(uint32_t);
+		pmboxq->mbox_offset_word = mbox_req->mbOffset;
+	}
+
+	/* biu diag will need a kernel buffer to transfer the data
+	 * allocate our own buffer and setup the mailbox command to
+	 * use ours
+	 */
+	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+		transmit_length = pmb->un.varWords[1];
+		receive_length = pmb->un.varWords[4];
+		/* transmit length cannot be greater than receive length or
+		 * mailbox extension size
+		 */
+		if ((transmit_length > receive_length) ||
+			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
+			rc = -ERANGE;
+			goto job_done;
+		}
+		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
+		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
+
+		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
+		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
+	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
+		rdEventLog = &pmb->un.varRdEventLog;
+		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
+		mode = bf_get(lpfc_event_log, rdEventLog);
+
+		/* receive length cannot be greater than mailbox
+		 * extension size
+		 */
+		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
+			rc = -ERANGE;
+			goto job_done;
+		}
+
+		/* mode zero uses a bde like biu diags command */
+		if (mode == 0) {
+			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+							+ sizeof(MAILBOX_t));
+			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+							+ sizeof(MAILBOX_t));
+		}
+	} else if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Let type 4 (well known data) through because the data is
+		 * returned in varwords[4-8]
+		 * otherwise check the recieve length and fetch the buffer addr
+		 */
+		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
+			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
+			/* rebuild the command for sli4 using our own buffers
+			* like we do for biu diags
+			*/
+			receive_length = pmb->un.varWords[2];
+			/* receive length cannot be greater than mailbox
+			 * extension size
+			 */
+			if (receive_length == 0) {
+				rc = -ERANGE;
+				goto job_done;
+			}
+			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
+			pmb->un.varUpdateCfg.co) {
+			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
+
+			/* bde size cannot be greater than mailbox ext size */
+			if (bde->tus.f.bdeSize >
+			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
+				rc = -ERANGE;
+				goto job_done;
+			}
+			bde->addrHigh = putPaddrHigh(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+			bde->addrLow = putPaddrLow(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
+			/* Handling non-embedded SLI_CONFIG mailbox command */
+			sli4_config = &pmboxq->u.mqe.un.sli4_config;
+			if (!bf_get(lpfc_mbox_hdr_emb,
+			    &sli4_config->header.cfg_mhdr)) {
+				/* rebuild the command for sli4 using our
+				 * own buffers like we do for biu diags
+				 */
+				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+						&pmb->un.varWords[0];
+				receive_length = nembed_sge->sge[0].length;
+
+				/* receive length cannot be greater than
+				 * mailbox extension size
+				 */
+				if ((receive_length == 0) ||
+				    (receive_length >
+				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
+					rc = -ERANGE;
+					goto job_done;
+				}
+
+				nembed_sge->sge[0].pa_hi =
+						putPaddrHigh(dmabuf->phys
+						   + sizeof(MAILBOX_t));
+				nembed_sge->sge[0].pa_lo =
+						putPaddrLow(dmabuf->phys
+						   + sizeof(MAILBOX_t));
+			}
+		}
+	}
+
+	dd_data->context_un.mbox.dmabuffers = dmabuf;
+
+	/* setup wake call as IOCB callback */
+	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
+
+	/* setup context field to pass wait_queue pointer to wake function */
+	pmboxq->context1 = dd_data;
+	dd_data->type = TYPE_MBOX;
+	dd_data->set_job = job;
+	dd_data->context_un.mbox.pmboxq = pmboxq;
+	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+	dd_data->context_un.mbox.ext = ext;
+	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
+	job->dd_data = dd_data;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+			goto job_done;
+		}
+
+		/* job finished, copy the data */
+		memcpy(pmbx, pmb, sizeof(*pmb));
+		bsg_reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    pmbx, size);
+		/* not waiting mbox already done */
+		rc = 0;
+		goto job_done;
+	}
+
+	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+		return 1; /* job started */
+
+job_done:
+	/* common exit for error or job completed inline */
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_free(phba, dmabuf);
+	kfree(dd_data);
+
+job_cont:
+	return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
+ * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
+ **/
+static int
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_hba *phba = vport->phba;
+	struct dfc_mbox_req *mbox_req;
+	int rc = 0;
+
+	/* mix-and-match backward compatibility */
+	bsg_reply->reply_payload_rcv_len = 0;
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2737 Mix-and-match backward compatibility "
+				"between MBOX_REQ old size:%d and "
+				"new request size:%d\n",
+				(int)(job->request_len -
+				      sizeof(struct fc_bsg_request)),
+				(int)sizeof(struct dfc_mbox_req));
+		mbox_req = (struct dfc_mbox_req *)
+				bsg_request->rqst_data.h_vendor.vendor_cmd;
+		mbox_req->extMboxTag = 0;
+		mbox_req->extSeqNum = 0;
+	}
+
+	rc = lpfc_bsg_issue_mbox(phba, job, vport);
+
+	if (rc == 0) {
+		/* job done */
+		bsg_reply->result = 0;
+		job->dd_data = NULL;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	} else if (rc == 1)
+		/* job submitted, will complete later*/
+		rc = 0; /* return zero, no error */
+	else {
+		/* some error occurred */
+		bsg_reply->result = rc;
+		job->dd_data = NULL;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_menlo_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp, *cmp, *rmp;
+	struct lpfc_bsg_menlo *menlo;
+	unsigned long flags;
+	struct menlo_response *menlo_resp;
+	unsigned int rsp_size;
+	int rc = 0;
+
+	dd_data = cmdiocbq->context1;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
+	menlo = &dd_data->context_un.menlo;
+	rmp = menlo->rmp;
+	rsp = &rspiocbq->iocb;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		bsg_reply = job->reply;
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Copy the job data or set the failing status for the job */
+
+	if (job) {
+		/* always return the xri, this would be used in the case
+		 * of a menlo download to allow the data to be sent as a
+		 * continuation of the exchange.
+		 */
+
+		menlo_resp = (struct menlo_response *)
+			bsg_reply->reply_data.vendor_reply.vendor_rsp;
+		menlo_resp->xri = rsp->ulpContext;
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
+				rc = -EACCES;
+			}
+		} else {
+			rsp_size = rsp->un.genreq64.bdl.bdeSize;
+			bsg_reply->reply_payload_rcv_len =
+				lpfc_bsg_copy_data(rmp, &job->reply_payload,
+						   rsp_size, 0);
+		}
+
+	}
+
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_free_bsg_buffers(phba, rmp);
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
+	kfree(dd_data);
+
+	/* Complete the job if active */
+
+	if (job) {
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_menlo_cmd - send an ioctl for menlo hardware
+ * @job: fc_bsg_job to handle
+ *
+ * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
+ * all the command completions will return the xri for the command.
+ * For menlo data requests a gen request 64 CX is used to continue the exchange
+ * supplied in the menlo request header xri field.
+ **/
+static int
+lpfc_menlo_cmd(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *cmd;
+	int rc = 0;
+	struct menlo_command *menlo_cmd;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
+	int request_nseg;
+	int reply_nseg;
+	struct bsg_job_data *dd_data;
+	struct ulp_bde64 *bpl = NULL;
+
+	/* in case no data is returned return just the return code */
+	bsg_reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+		sizeof(struct menlo_command)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2784 Received MENLO_CMD request below "
+				"minimum size\n");
+		rc = -ERANGE;
+		goto no_dd_data;
+	}
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2785 Received MENLO_CMD reply below "
+				"minimum size\n");
+		rc = -ERANGE;
+		goto no_dd_data;
+	}
+
+	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2786 Adapter does not support menlo "
+				"commands\n");
+		rc = -EPERM;
+		goto no_dd_data;
+	}
+
+	menlo_cmd = (struct menlo_command *)
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2787 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_dd;
+	}
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+
+	bpl = (struct ulp_bde64 *)bmp->virt;
+	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &request_nseg);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
+
+	bpl += request_nseg;
+	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+				     bpl, &reply_nseg);
+	if (!rmp) {
+		rc = -ENOMEM;
+		goto free_cmp;
+	}
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_rmp;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.genreq64.bdl.bdeSize =
+	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
+	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
+	cmd->ulpBdeCount = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpOwner = OWN_CHIP;
+	cmd->ulpLe = 1; /* Limited Edition */
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+	/* We want the firmware to timeout before we do */
+	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
+	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = cmp;
+	cmdiocbq->context3 = bmp;
+	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
+		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+		cmd->ulpPU = MENLO_PU; /* 3 */
+		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
+		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
+	} else {
+		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
+		cmd->ulpPU = 1;
+		cmd->un.ulpWord[4] = 0;
+		cmd->ulpContext = menlo_cmd->xri;
+	}
+
+	dd_data->type = TYPE_MENLO;
+	dd_data->set_job = job;
+	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
+	dd_data->context_un.menlo.rmp = rmp;
+	job->dd_data = dd_data;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+		MENLO_TIMEOUT - 5);
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+free_rmp:
+	lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+	lpfc_free_bsg_buffers(phba, cmp);
+free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
+free_dd:
+	kfree(dd_data);
+no_dd_data:
+	/* make error code available to userspace */
+	bsg_reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct forced_link_speed_support_reply *forced_reply;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct get_forced_link_speed_support)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"0048 Received FORCED_LINK_SPEED request "
+				"below minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	forced_reply = (struct forced_link_speed_support_reply *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct forced_link_speed_support_reply)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"0049 Received FORCED_LINK_SPEED reply below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+	bsg_reply->result = rc;
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_hst_vendor(struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+	int rc;
+
+	switch (command) {
+	case LPFC_BSG_VENDOR_SET_CT_EVENT:
+		rc = lpfc_bsg_hba_set_event(job);
+		break;
+	case LPFC_BSG_VENDOR_GET_CT_EVENT:
+		rc = lpfc_bsg_hba_get_event(job);
+		break;
+	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
+		rc = lpfc_bsg_send_mgmt_rsp(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_MODE:
+		rc = lpfc_bsg_diag_loopback_mode(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_MODE_END:
+		rc = lpfc_sli4_bsg_diag_mode_end(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+		rc = lpfc_bsg_diag_loopback_run(job);
+		break;
+	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+		rc = lpfc_sli4_bsg_link_diag_test(job);
+		break;
+	case LPFC_BSG_VENDOR_GET_MGMT_REV:
+		rc = lpfc_bsg_get_dfc_rev(job);
+		break;
+	case LPFC_BSG_VENDOR_MBOX:
+		rc = lpfc_bsg_mbox_cmd(job);
+		break;
+	case LPFC_BSG_VENDOR_MENLO_CMD:
+	case LPFC_BSG_VENDOR_MENLO_DATA:
+		rc = lpfc_menlo_cmd(job);
+		break;
+	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+		rc = lpfc_forced_link_speed(job);
+		break;
+	default:
+		rc = -EINVAL;
+		bsg_reply->reply_payload_rcv_len = 0;
+		/* make error code available to userspace */
+		bsg_reply->result = rc;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_request - handle a bsg request from the FC transport
+ * @job: fc_bsg_job to handle
+ **/
+int
+lpfc_bsg_request(struct bsg_job *job)
+{
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t msgcode;
+	int rc;
+
+	msgcode = bsg_request->msgcode;
+	switch (msgcode) {
+	case FC_BSG_HST_VENDOR:
+		rc = lpfc_bsg_hst_vendor(job);
+		break;
+	case FC_BSG_RPT_ELS:
+		rc = lpfc_bsg_rport_els(job);
+		break;
+	case FC_BSG_RPT_CT:
+		rc = lpfc_bsg_send_mgmt_cmd(job);
+		break;
+	default:
+		rc = -EINVAL;
+		bsg_reply->reply_payload_rcv_len = 0;
+		/* make error code available to userspace */
+		bsg_reply->result = rc;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
+ * @job: fc_bsg_job that has timed out
+ *
+ * This function just aborts the job's IOCB.  The aborted IOCB will return to
+ * the waiting function which will handle passing the error back to userspace
+ **/
+int
+lpfc_bsg_timeout(struct bsg_job *job)
+{
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb;
+	struct lpfc_sli_ring *pring;
+	struct bsg_job_data *dd_data;
+	unsigned long flags;
+	int rc = 0;
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *check_iocb, *next_iocb;
+
+	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return -EIO;
+
+	/* if job's driver data is NULL, the command completed or is in the
+	 * the process of completing.  In this case, return status to request
+	 * so the timeout is retried.  This avoids double completion issues
+	 * and the request will be pulled off the timer queue when the
+	 * command's completion handler executes.  Otherwise, prevent the
+	 * command's completion handler from executing the job done callback
+	 * and continue processing to abort the outstanding the command.
+	 */
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = (struct bsg_job_data *)job->dd_data;
+	if (dd_data) {
+		dd_data->set_job = NULL;
+		job->dd_data = NULL;
+	} else {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return -EAGAIN;
+	}
+
+	switch (dd_data->type) {
+	case TYPE_IOCB:
+		/* Check to see if IOCB was issued to the port or not. If not,
+		 * remove it from the txq queue and call cancel iocbs.
+		 * Otherwise, call abort iotag
+		 */
+		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+		spin_lock_irqsave(&phba->hbalock, flags);
+		/* make sure the I/O abort window is still open */
+		if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return -EAGAIN;
+		}
+		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+					 list) {
+			if (check_iocb == cmdiocb) {
+				list_move_tail(&check_iocb->list, &completions);
+				break;
+			}
+		}
+		if (list_empty(&completions))
+			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		if (!list_empty(&completions)) {
+			lpfc_sli_cancel_iocbs(phba, &completions,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_ABORTED);
+		}
+		break;
+
+	case TYPE_EVT:
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		break;
+
+	case TYPE_MBOX:
+		/* Update the ext buf ctx state if needed */
+
+		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		break;
+	case TYPE_MENLO:
+		/* Check to see if IOCB was issued to the port or not. If not,
+		 * remove it from the txq queue and call cancel iocbs.
+		 * Otherwise, call abort iotag.
+		 */
+		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+		spin_lock_irqsave(&phba->hbalock, flags);
+		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+					 list) {
+			if (check_iocb == cmdiocb) {
+				list_move_tail(&check_iocb->list, &completions);
+				break;
+			}
+		}
+		if (list_empty(&completions))
+			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		if (!list_empty(&completions)) {
+			lpfc_sli_cancel_iocbs(phba, &completions,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_ABORTED);
+		}
+		break;
+	default:
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		break;
+	}
+
+	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
+	 * otherwise an error message will be displayed on the console
+	 * so always return success (zero)
+	 */
+	return rc;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_bsg.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 0000000..e7d95a4
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,301 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2010-2015 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+/* bsg definitions
+ * No pointers to user data are allowed, all application buffers and sizes will
+ * derived through the bsg interface.
+ *
+ * These are the vendor unique structures passed in using the bsg
+ * FC_BSG_HST_VENDOR message code type.
+ */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT		1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT		2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP		3
+#define LPFC_BSG_VENDOR_DIAG_MODE		4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK	5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV		6
+#define LPFC_BSG_VENDOR_MBOX			7
+#define LPFC_BSG_VENDOR_MENLO_CMD		8
+#define LPFC_BSG_VENDOR_MENLO_DATA		9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END		10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST		11
+#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED	14
+
+struct set_ct_event {
+	uint32_t command;
+	uint32_t type_mask;
+	uint32_t ev_req_id;
+	uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+	uint32_t command;
+	uint32_t ev_reg_id;
+	uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+	uint32_t immed_data;
+	uint32_t type;
+};
+
+struct send_mgmt_resp {
+	uint32_t command;
+	uint32_t tag;
+};
+
+
+#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
+#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
+
+struct diag_mode_set {
+	uint32_t command;
+	uint32_t type;
+	uint32_t timeout;
+};
+
+struct sli4_link_diag {
+	uint32_t command;
+	uint32_t timeout;
+	uint32_t test_id;
+	uint32_t loops;
+	uint32_t test_version;
+	uint32_t error_action;
+};
+
+struct diag_mode_test {
+	uint32_t command;
+};
+
+struct diag_status {
+	uint32_t mbox_status;
+	uint32_t shdr_status;
+	uint32_t shdr_add_status;
+};
+
+#define LPFC_WWNN_TYPE		0
+#define LPFC_WWPN_TYPE		1
+
+struct get_mgmt_rev {
+	uint32_t command;
+};
+
+#define MANAGEMENT_MAJOR_REV   1
+#define MANAGEMENT_MINOR_REV   1
+
+/* the MgmtRevInfo structure */
+struct MgmtRevInfo {
+	uint32_t a_Major;
+	uint32_t a_Minor;
+};
+
+struct get_mgmt_rev_reply {
+	struct MgmtRevInfo info;
+};
+
+#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
+struct dfc_mbox_req {
+	uint32_t command;
+	uint32_t mbOffset;
+	uint32_t inExtWLen;
+	uint32_t outExtWLen;
+	uint32_t extMboxTag;
+	uint32_t extSeqNum;
+};
+
+/* Used for menlo command or menlo data. The xri is only used for menlo data */
+struct menlo_command {
+	uint32_t cmd;
+	uint32_t xri;
+};
+
+struct menlo_response {
+	uint32_t xri; /* return the xri of the iocb exchange */
+};
+
+/*
+ * macros and data structures for handling sli-config mailbox command
+ * pass-through support, this header file is shared between user and
+ * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h,
+ * with macro names prefixed with bsg_, as the macros defined in
+ * lpfc_hw4.h are not accessible from user space.
+ */
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *      struct temp {
+ *              uint32_t        field1;
+ *              uint32_t        field2;
+ *              uint32_t        field3;
+ *              uint32_t        field4;
+ *      #define example_bit_field_SHIFT         7
+ *      #define example_bit_field_MASK          0x03
+ *      #define example_bit_field_WORD          field4
+ *              uint32_t        field5;
+ *      };
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *      struct temp t1;
+ *      value = bsg_bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *      bsg_bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *      bsg_bf_set(example_bit_field, &t1, 0);
+ */
+#define bsg_bf_get_le32(name, ptr) \
+	((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bsg_bf_get(name, ptr) \
+	(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bsg_bf_set_le32(name, ptr, value) \
+	((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+	name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+	~(name##_MASK << name##_SHIFT)))))
+#define bsg_bf_set(name, ptr, value) \
+	((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+	((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+/*
+ * The sli_config structure specified here is based on the following
+ * restriction:
+ *
+ * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without
+ *    carrying HBD.
+ * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or
+ *    without carrying HBDs.
+ */
+
+struct lpfc_sli_config_mse {
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+	uint32_t buf_len;
+#define lpfc_mbox_sli_config_mse_len_SHIFT	0
+#define lpfc_mbox_sli_config_mse_len_MASK	0xffffff
+#define lpfc_mbox_sli_config_mse_len_WORD	buf_len
+};
+
+struct lpfc_sli_config_hbd {
+	uint32_t buf_len;
+#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT	0
+#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK	0xffffff
+#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD	buf_len
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+};
+
+struct lpfc_sli_config_hdr {
+	uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT		0
+#define lpfc_mbox_hdr_emb_MASK		0x00000001
+#define lpfc_mbox_hdr_emb_WORD		word1
+#define lpfc_mbox_hdr_mse_cnt_SHIFT	3
+#define lpfc_mbox_hdr_mse_cnt_MASK	0x0000001f
+#define lpfc_mbox_hdr_mse_cnt_WORD	word1
+	uint32_t payload_length;
+	uint32_t tag_lo;
+	uint32_t tag_hi;
+	uint32_t reserved5;
+};
+
+struct lpfc_sli_config_emb0_subsys {
+	struct lpfc_sli_config_hdr	sli_config_hdr;
+#define LPFC_MBX_SLI_CONFIG_MAX_MSE     19
+	struct lpfc_sli_config_mse	mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+	uint32_t padding;
+	uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT	0
+#define lpfc_emb0_subcmnd_opcode_MASK	0xff
+#define lpfc_emb0_subcmnd_opcode_WORD	word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT	8
+#define lpfc_emb0_subcmnd_subsys_MASK	0xff
+#define lpfc_emb0_subcmnd_subsys_WORD	word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE		0x0C
+#define FCOE_OPCODE_READ_FCF		0x08
+#define FCOE_OPCODE_ADD_FCF		0x09
+#define FCOE_OPCODE_SET_DPORT_MODE	0x27
+#define FCOE_OPCODE_GET_DPORT_RESULTS	0x28
+};
+
+struct lpfc_sli_config_emb1_subsys {
+	struct lpfc_sli_config_hdr	sli_config_hdr;
+	uint32_t word6;
+#define lpfc_emb1_subcmnd_opcode_SHIFT	0
+#define lpfc_emb1_subcmnd_opcode_MASK	0xff
+#define lpfc_emb1_subcmnd_opcode_WORD	word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT	8
+#define lpfc_emb1_subcmnd_subsys_MASK	0xff
+#define lpfc_emb1_subcmnd_subsys_WORD	word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN		0x01
+#define COMN_OPCODE_GET_PROFILE_CONFIG	0xA4
+#define COMN_OPCODE_READ_OBJECT		0xAB
+#define COMN_OPCODE_WRITE_OBJECT	0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST	0xAD
+#define COMN_OPCODE_DELETE_OBJECT	0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES	0x79
+#define COMN_OPCODE_GET_CNTL_ATTRIBUTES	0x20
+	uint32_t timeout;
+	uint32_t request_length;
+	uint32_t word9;
+#define lpfc_subcmnd_version_SHIFT	0
+#define lpfc_subcmnd_version_MASK	0xff
+#define lpfc_subcmnd_version_WORD	word9
+	uint32_t word10;
+#define lpfc_subcmnd_ask_rd_len_SHIFT	0
+#define lpfc_subcmnd_ask_rd_len_MASK	0xffffff
+#define lpfc_subcmnd_ask_rd_len_WORD	word10
+	uint32_t rd_offset;
+	uint32_t obj_name[26];
+	uint32_t hbd_count;
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD	8
+	struct lpfc_sli_config_hbd	hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+};
+
+struct lpfc_sli_config_mbox {
+	uint32_t word0;
+#define lpfc_mqe_status_SHIFT		16
+#define lpfc_mqe_status_MASK		0x0000FFFF
+#define lpfc_mqe_status_WORD		word0
+#define lpfc_mqe_command_SHIFT		8
+#define lpfc_mqe_command_MASK		0x000000FF
+#define lpfc_mqe_command_WORD		word0
+	union {
+		struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+		struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
+	} un;
+};
+
+#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED	0
+#define LPFC_FORCED_LINK_SPEED_SUPPORTED	1
+struct get_forced_link_speed_support {
+	uint32_t command;
+};
+struct forced_link_speed_support_reply {
+	uint8_t supported;
+};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED		0
+#define SLI_CONFIG_HANDLED		1
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_compat.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_compat.h
new file mode 100644
index 0000000..6b32b0a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_compat.h
@@ -0,0 +1,98 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/*
+ * This file provides macros to aid compilation in the Linux 2.4 kernel
+ * over various platform architectures.
+ */
+
+/*******************************************************************
+Note: HBA's SLI memory contains little-endian LW.
+Thus to access it from a little-endian host,
+memcpy_toio() and memcpy_fromio() can be used.
+However on a big-endian host, copy 4 bytes at a time,
+using writel() and readl().
+ *******************************************************************/
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+static inline void
+lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
+{
+	uint32_t __iomem *dest32;
+	uint32_t *src32;
+	unsigned int four_bytes;
+
+
+	dest32  = (uint32_t __iomem *) dest;
+	src32  = (uint32_t *) src;
+
+	/* write input bytes, 4 bytes at a time */
+	for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+		writel( *src32, dest32);
+		readl(dest32); /* flush */
+		dest32++;
+		src32++;
+	}
+
+	return;
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+	uint32_t *dest32;
+	uint32_t __iomem *src32;
+	unsigned int four_bytes;
+
+
+	dest32  = (uint32_t *) dest;
+	src32  = (uint32_t __iomem *) src;
+
+	/* read input bytes, 4 bytes at a time */
+	for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+		*dest32 = readl( src32);
+		dest32++;
+		src32++;
+	}
+
+	return;
+}
+
+#else
+
+static inline void
+lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
+{
+	/* convert bytes in argument list to word count for copy function */
+	__iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+	/* actually returns 1 byte past dest */
+	memcpy_fromio( dest, src, bytes);
+}
+
+#endif	/* __BIG_ENDIAN */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_crtn.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_crtn.h
new file mode 100644
index 0000000..7e30073
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_crtn.h
@@ -0,0 +1,568 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+typedef int (*node_filter)(struct lpfc_nodelist *, void *);
+
+struct fc_rport;
+struct fc_frame_header;
+struct lpfc_nvmet_rcv_ctx;
+void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli_read_link_ste(struct lpfc_hba *);
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
+void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+		 LPFC_MBOXQ_t *, uint16_t);
+void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+			struct lpfc_nodelist *);
+void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_supported_pages(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
+int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+			   uint16_t, uint16_t, bool);
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
+void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
+void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
+void lpfc_cleanup_rpis(struct lpfc_vport *, int);
+void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
+int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
+void lpfc_port_link_failure(struct lpfc_vport *);
+void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
+void lpfc_retry_pport_discovery(struct lpfc_hba *);
+void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
+void lpfc_free_iocb_list(struct lpfc_hba *phba);
+int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+			struct lpfc_queue *drq, int count, int idx);
+
+void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
+					struct lpfc_nodelist *, int);
+void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_set_disctmo(struct lpfc_vport *);
+int  lpfc_can_disctmo(struct lpfc_vport *);
+int  lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_hba_rpis(struct lpfc_hba *);
+void lpfc_unreg_default_rpis(struct lpfc_vport *);
+void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
+
+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
+			struct lpfc_iocbq *, struct lpfc_nodelist *);
+struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
+struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
+int  lpfc_nlp_put(struct lpfc_nodelist *);
+int  lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_vport *);
+void lpfc_disc_start(struct lpfc_vport *);
+void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
+void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_disc_timeout(unsigned long);
+
+int lpfc_unregister_fcf_prep(struct lpfc_hba *);
+struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+void lpfc_worker_wake_up(struct lpfc_hba *);
+int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+int lpfc_do_work(void *);
+int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
+			    uint32_t);
+
+void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
+int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
+		     struct serv_parm *, uint32_t, int);
+void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_more_plogi(struct lpfc_vport *);
+void lpfc_more_adisc(struct lpfc_vport *);
+void lpfc_end_rscn(struct lpfc_vport *);
+int lpfc_els_chk_latt(struct lpfc_vport *);
+int lpfc_els_abort_flogi(struct lpfc_hba *);
+int lpfc_initial_flogi(struct lpfc_vport *);
+void lpfc_issue_init_vfi(struct lpfc_vport *);
+int lpfc_initial_fdisc(struct lpfc_vport *);
+int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
+int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+		     struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+			struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
+			   struct lpfc_nodelist *);
+int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
+			  struct lpfc_nodelist *);
+void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			  struct lpfc_iocbq *);
+int lpfc_els_handle_rscn(struct lpfc_vport *);
+void lpfc_els_flush_rscn(struct lpfc_vport *);
+int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
+void lpfc_els_flush_all_cmd(struct lpfc_hba *);
+void lpfc_els_flush_cmd(struct lpfc_vport *);
+int lpfc_els_disc_adisc(struct lpfc_vport *);
+int lpfc_els_disc_plogi(struct lpfc_vport *);
+void lpfc_els_timeout(unsigned long);
+void lpfc_els_timeout_handler(struct lpfc_vport *);
+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
+				      uint8_t, struct lpfc_nodelist *,
+				      uint32_t, uint32_t);
+void lpfc_hb_timeout_handler(struct lpfc_hba *);
+
+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			 struct lpfc_iocbq *);
+int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+int lpfc_issue_gidft(struct lpfc_vport *vport);
+int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
+int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
+int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
+void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
+
+int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
+int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_hba_down_prep(struct lpfc_hba *);
+int lpfc_hba_down_post(struct lpfc_hba *);
+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
+int lpfc_online(struct lpfc_hba *);
+void lpfc_unblock_mgmt_io(struct lpfc_hba *);
+void lpfc_offline_prep(struct lpfc_hba *, int);
+void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
+int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
+			spinlock_t *slock);
+
+int lpfc_fof_queue_create(struct lpfc_hba *);
+int lpfc_fof_queue_setup(struct lpfc_hba *);
+int lpfc_fof_queue_destroy(struct lpfc_hba *);
+irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
+
+int lpfc_sli_setup(struct lpfc_hba *);
+int lpfc_sli4_setup(struct lpfc_hba *phba);
+void lpfc_sli_queue_init(struct lpfc_hba *phba);
+void lpfc_sli4_queue_init(struct lpfc_hba *phba);
+struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba,
+					  struct lpfc_iocbq *iocbq);
+
+void lpfc_handle_eratt(struct lpfc_hba *);
+void lpfc_handle_latt(struct lpfc_hba *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+
+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
+int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
+int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
+void lpfc_issue_init_vpi(struct lpfc_vport *);
+
+void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
+	uint32_t , LPFC_MBOXQ_t *);
+struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
+void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
+void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
+void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
+			    struct lpfc_nvmet_ctxbuf *ctxp);
+int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
+			       struct fc_frame_header *fc_hdr);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+			uint16_t);
+int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+		     struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
+int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
+void lpfc_unregister_fcf(struct lpfc_hba *);
+void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
+int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
+void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
+void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
+uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
+void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
+void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
+
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
+int lpfc_nvmet_mem_alloc(struct lpfc_hba *phba);
+int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);
+void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
+void lpfc_stop_vport_timers(struct lpfc_vport *);
+
+void lpfc_poll_timeout(unsigned long ptr);
+void lpfc_poll_start_timer(struct lpfc_hba *);
+void lpfc_poll_eratt(unsigned long);
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
+			struct lpfc_sli_ring *, uint32_t);
+
+struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *);
+struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
+void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
+uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
+void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
+			   uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_selective_reset(struct lpfc_hba *);
+void lpfc_reset_barrier(struct lpfc_hba *);
+int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
+int lpfc_sli_brdkill(struct lpfc_hba *);
+int lpfc_sli_chipset_init(struct lpfc_hba *phba);
+int lpfc_sli_brdreset(struct lpfc_hba *);
+int lpfc_sli_brdrestart(struct lpfc_hba *);
+int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_config_port(struct lpfc_hba *, int);
+int lpfc_sli_host_down(struct lpfc_vport *);
+int lpfc_sli_hba_down(struct lpfc_hba *);
+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+int lpfc_sli_handle_mb_event(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int);
+int lpfc_sli_check_eratt(struct lpfc_hba *);
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+				    struct lpfc_sli_ring *, uint32_t);
+void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+			     struct fc_frame_header *fc_hdr, bool aborted);
+void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
+			struct lpfc_iocbq *, uint32_t);
+int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
+			struct lpfc_iocbq *iocbq);
+struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
+struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
+					    struct lpfc_iocbq *piocbq);
+void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
+void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+			     struct lpfc_dmabuf *);
+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
+					     struct lpfc_sli_ring *,
+					     dma_addr_t);
+
+uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *);
+struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *,
+			struct lpfc_sli_ring *, uint32_t );
+
+int lpfc_sli_hbq_count(void);
+int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
+void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
+int lpfc_sli_hbq_size(void);
+int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
+			       struct lpfc_iocbq *);
+int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
+			uint64_t, lpfc_ctx_cmd);
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
+			uint16_t, uint64_t, lpfc_ctx_cmd);
+
+void lpfc_mbox_timeout(unsigned long);
+void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
+struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
+					 struct lpfc_name *);
+
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
+			     struct lpfc_iocbq *, struct lpfc_iocbq *,
+			     uint32_t);
+void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
+			     struct lpfc_iocbq *);
+
+void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
+
+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void *lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int flags,
+			dma_addr_t *handle);
+void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
+
+void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
+void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+
+/* Function prototypes. */
+const char* lpfc_info(struct Scsi_Host *);
+int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
+
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
+void lpfc_get_cfgparam(struct lpfc_hba *);
+void lpfc_get_vport_cfgparam(struct lpfc_vport *);
+int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
+void lpfc_free_sysfs_attr(struct lpfc_vport *);
+extern struct device_attribute *lpfc_hba_attrs[];
+extern struct device_attribute *lpfc_vport_attrs[];
+extern struct scsi_host_template lpfc_template;
+extern struct scsi_host_template lpfc_template_no_hr;
+extern struct scsi_host_template lpfc_template_nvme;
+extern struct scsi_host_template lpfc_vport_template;
+extern struct fc_function_template lpfc_transport_functions;
+extern struct fc_function_template lpfc_vport_transport_functions;
+
+int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *,	size_t);
+void lpfc_terminate_rport_io(struct fc_rport *);
+void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
+
+struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
+int  lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
+int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+void destroy_port(struct lpfc_vport *);
+int lpfc_get_instance(void);
+void lpfc_host_attrib_init(struct Scsi_Host *);
+
+extern void lpfc_debugfs_initialize(struct lpfc_vport *);
+extern void lpfc_debugfs_terminate(struct lpfc_vport *);
+extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
+				  uint32_t, uint32_t);
+extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
+				       uint32_t, uint32_t);
+extern void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt,
+				uint16_t data1, uint16_t data2, uint32_t data3);
+extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
+				   uint16_t *, uint16_t *);
+int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
+					  uint16_t *, uint16_t *);
+
+/* externs BlockGuard */
+extern char *_dump_buf_data;
+extern unsigned long _dump_buf_data_order;
+extern char *_dump_buf_dif;
+extern unsigned long _dump_buf_dif_order;
+extern spinlock_t _dump_buf_lock;
+extern int _dump_buf_done;
+extern spinlock_t pgcnt_lock;
+extern unsigned int pgcnt;
+extern unsigned int lpfc_fcp_look_ahead;
+
+/* Interface exported by fabric iocb scheduler */
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+void lpfc_fabric_abort_hba(struct lpfc_hba *);
+void lpfc_fabric_block_timeout(unsigned long);
+void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
+void lpfc_rampdown_queue_depth(struct lpfc_hba *);
+void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+void lpfc_scsi_dev_block(struct lpfc_hba *);
+
+void
+lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+				struct lpfc_iocbq *);
+struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
+void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
+struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
+struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define HBA_EVENT_RSCN                   5
+#define HBA_EVENT_LINK_UP                2
+#define HBA_EVENT_LINK_DOWN              3
+
+/* functions to support SGIOv4/bsg interface */
+int lpfc_bsg_request(struct bsg_job *);
+int lpfc_bsg_timeout(struct bsg_job *);
+int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			     struct lpfc_iocbq *);
+int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+	struct lpfc_iocbq *);
+struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
+	struct lpfc_sli_ring *);
+int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
+	struct lpfc_iocbq *, uint32_t);
+uint32_t lpfc_drain_txq(struct lpfc_hba *);
+void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *);
+int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t);
+void lpfc_handle_rrq_active(struct lpfc_hba *);
+int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
+int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
+	uint16_t, uint16_t, uint16_t);
+uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
+	uint32_t);
+void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
+	enum mbox_type, enum dma_type, enum sta_type,
+	struct lpfc_dmabuf *, uint32_t);
+void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
+uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
+int lpfc_sli4_queue_create(struct lpfc_hba *);
+void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+				struct sli4_wcqe_xri_aborted *);
+void lpfc_sli_abts_recover_port(struct lpfc_vport *,
+				struct lpfc_nodelist *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *);
+void lpfc_sli4_node_prep(struct lpfc_hba *);
+int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
+void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
+uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
+int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
+
+struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
+						struct lpfc_name *,
+						struct lpfc_name *,
+						uint64_t, uint32_t,  bool);
+void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
+struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
+					struct list_head *list,
+					struct lpfc_name *,
+					struct lpfc_name *, uint64_t);
+bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			 struct lpfc_name *, uint64_t, uint8_t);
+bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			  struct lpfc_name *, uint64_t, uint8_t);
+bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
+			    struct lpfc_name *, uint64_t *, struct lpfc_name *,
+			    struct lpfc_name *, uint64_t *,
+			    uint32_t *, uint32_t *);
+int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
+void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
+
+/* NVME interfaces. */
+void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp);
+int lpfc_nvme_register_port(struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp);
+int lpfc_nvme_create_localport(struct lpfc_vport *vport);
+void lpfc_nvme_destroy_localport(struct lpfc_vport *vport);
+void lpfc_nvme_update_localport(struct lpfc_vport *vport);
+int lpfc_nvmet_create_targetport(struct lpfc_hba *phba);
+int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
+void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
+void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
+			struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
+void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
+				struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
+void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
+				struct lpfc_iocbq *cmdiocb,
+				struct lpfc_wcqe_complete *abts_cmpl);
+extern int lpfc_enable_nvmet_cnt;
+extern unsigned long long lpfc_enable_nvmet[];
+extern int lpfc_no_hba_reset_cnt;
+extern unsigned long lpfc_no_hba_reset[];
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_ct.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_ct.c
new file mode 100644
index 0000000..e193fa5
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_ct.c
@@ -0,0 +1,3020 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/*
+ * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* FDMI Port Speed definitions - FC-GS-7 */
+#define HBA_PORTSPEED_1GFC		0x00000001	/* 1G FC */
+#define HBA_PORTSPEED_2GFC		0x00000002	/* 2G FC */
+#define HBA_PORTSPEED_4GFC		0x00000008	/* 4G FC */
+#define HBA_PORTSPEED_10GFC		0x00000004	/* 10G FC */
+#define HBA_PORTSPEED_8GFC		0x00000010	/* 8G FC */
+#define HBA_PORTSPEED_16GFC		0x00000020	/* 16G FC */
+#define HBA_PORTSPEED_32GFC		0x00000040	/* 32G FC */
+#define HBA_PORTSPEED_20GFC		0x00000080	/* 20G FC */
+#define HBA_PORTSPEED_40GFC		0x00000100	/* 40G FC */
+#define HBA_PORTSPEED_128GFC		0x00000200	/* 128G FC */
+#define HBA_PORTSPEED_64GFC		0x00000400	/* 64G FC */
+#define HBA_PORTSPEED_256GFC		0x00000800	/* 256G FC */
+#define HBA_PORTSPEED_UNKNOWN		0x00008000	/* Unknown */
+#define HBA_PORTSPEED_10GE		0x00010000	/* 10G E */
+#define HBA_PORTSPEED_40GE		0x00020000	/* 40G E */
+#define HBA_PORTSPEED_100GE		0x00040000	/* 100G E */
+#define HBA_PORTSPEED_25GE		0x00080000	/* 25G E */
+#define HBA_PORTSPEED_50GE		0x00100000	/* 50G E */
+#define HBA_PORTSPEED_400GE		0x00200000	/* 400G E */
+
+#define FOURBYTES	4
+
+
+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+
+static void
+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+			  struct lpfc_dmabuf *mp, uint32_t size)
+{
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+				"0146 Ignoring unsolicited CT No HBQ "
+				"status = x%x\n",
+				piocbq->iocb.ulpStatus);
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0145 Ignoring unsolicted CT HBQ Size:%d "
+			"status = x%x\n",
+			size, piocbq->iocb.ulpStatus);
+}
+
+static void
+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+		     struct lpfc_dmabuf *mp, uint32_t size)
+{
+	lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
+}
+
+void
+lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		    struct lpfc_iocbq *piocbq)
+{
+	struct lpfc_dmabuf *mp = NULL;
+	IOCB_t *icmd = &piocbq->iocb;
+	int i;
+	struct lpfc_iocbq *iocbq;
+	dma_addr_t paddr;
+	uint32_t size;
+	struct list_head head;
+	struct lpfc_dmabuf *bdeBuf;
+
+	if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+		return;
+
+	if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+	} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+		   ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+		   IOERR_RCV_BUFFER_WAITING)) {
+		/* Not enough posted buffers; Try posting more buffers */
+		phba->fc_stat.NoRcvBuf++;
+		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+			lpfc_post_buffer(phba, pring, 2);
+		return;
+	}
+
+	/* If there are no BDEs associated with this IOCB,
+	 * there is nothing to do.
+	 */
+	if (icmd->ulpBdeCount == 0)
+		return;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		INIT_LIST_HEAD(&head);
+		list_add_tail(&head, &piocbq->list);
+		list_for_each_entry(iocbq, &head, list) {
+			icmd = &iocbq->iocb;
+			if (icmd->ulpBdeCount == 0)
+				continue;
+			bdeBuf = iocbq->context2;
+			iocbq->context2 = NULL;
+			size  = icmd->un.cont64[0].tus.f.bdeSize;
+			lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
+			lpfc_in_buf_free(phba, bdeBuf);
+			if (icmd->ulpBdeCount == 2) {
+				bdeBuf = iocbq->context3;
+				iocbq->context3 = NULL;
+				size  = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+				lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+						     size);
+				lpfc_in_buf_free(phba, bdeBuf);
+			}
+		}
+		list_del(&head);
+	} else {
+		INIT_LIST_HEAD(&head);
+		list_add_tail(&head, &piocbq->list);
+		list_for_each_entry(iocbq, &head, list) {
+			icmd = &iocbq->iocb;
+			if (icmd->ulpBdeCount == 0)
+				lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
+			for (i = 0; i < icmd->ulpBdeCount; i++) {
+				paddr = getPaddr(icmd->un.cont64[i].addrHigh,
+						 icmd->un.cont64[i].addrLow);
+				mp = lpfc_sli_ringpostbuf_get(phba, pring,
+							      paddr);
+				size = icmd->un.cont64[i].tus.f.bdeSize;
+				lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
+				lpfc_in_buf_free(phba, mp);
+			}
+			lpfc_post_buffer(phba, pring, i);
+		}
+		list_del(&head);
+	}
+}
+
+/**
+ * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function serves as the upper level protocol abort handler for CT
+ * protocol.
+ *
+ * Return 1 if abort has been handled, 0 otherwise.
+ **/
+int
+lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+	int handled;
+
+	/* CT upper level goes through BSG */
+	handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
+
+	return handled;
+}
+
+static void
+lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+	struct lpfc_dmabuf *mlast, *next_mlast;
+
+	list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
+		lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+		list_del(&mlast->list);
+		kfree(mlast);
+	}
+	lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+	kfree(mlist);
+	return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
+		  uint32_t size, int *entries)
+{
+	struct lpfc_dmabuf *mlist = NULL;
+	struct lpfc_dmabuf *mp;
+	int cnt, i = 0;
+
+	/* We get chunks of FCELSSIZE */
+	cnt = size > FCELSSIZE ? FCELSSIZE: size;
+
+	while (size) {
+		/* Allocate buffer for rsp payload */
+		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!mp) {
+			if (mlist)
+				lpfc_free_ct_rsp(phba, mlist);
+			return NULL;
+		}
+
+		INIT_LIST_HEAD(&mp->list);
+
+		if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
+		    cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
+			mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+		else
+			mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+
+		if (!mp->virt) {
+			kfree(mp);
+			if (mlist)
+				lpfc_free_ct_rsp(phba, mlist);
+			return NULL;
+		}
+
+		/* Queue it to a linked list */
+		if (!mlist)
+			mlist = mp;
+		else
+			list_add_tail(&mp->list, &mlist->list);
+
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		/* build buffer ptr list for IOCB */
+		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+		bpl->tus.f.bdeSize = (uint16_t) cnt;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+
+		i++;
+		size -= cnt;
+	}
+
+	*entries = i;
+	return mlist;
+}
+
+int
+lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
+{
+	struct lpfc_dmabuf *buf_ptr;
+
+	if (ctiocb->context_un.ndlp) {
+		lpfc_nlp_put(ctiocb->context_un.ndlp);
+		ctiocb->context_un.ndlp = NULL;
+	}
+	if (ctiocb->context1) {
+		buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+		ctiocb->context1 = NULL;
+	}
+	if (ctiocb->context2) {
+		lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
+		ctiocb->context2 = NULL;
+	}
+
+	if (ctiocb->context3) {
+		buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+		ctiocb->context3 = NULL;
+	}
+	lpfc_sli_release_iocbq(phba, ctiocb);
+	return 0;
+}
+
+/**
+ * lpfc_gen_req - Build and issue a GEN_REQUEST command  to the SLI Layer
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @bmp: Pointer to BPL for SLI command
+ * @inp: Pointer to data buffer for response data.
+ * @outp: Pointer to data buffer that hold the CT command.
+ * @cmpl: completion routine to call when command completes
+ * @ndlp: Destination NPort nodelist entry
+ *
+ * This function as the final part for issuing a CT command.
+ */
+static int
+lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+	     struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
+	     void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+		     struct lpfc_iocbq *),
+	     struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+	     uint32_t tmo, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *geniocb;
+	int rc;
+
+	/* Allocate buffer for  command iocb */
+	geniocb = lpfc_sli_get_iocbq(phba);
+
+	if (geniocb == NULL)
+		return 1;
+
+	icmd = &geniocb->iocb;
+	icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+
+	if (usr_flg)
+		geniocb->context3 = NULL;
+	else
+		geniocb->context3 = (uint8_t *) bmp;
+
+	/* Save for completion so we can release these resources */
+	geniocb->context1 = (uint8_t *) inp;
+	geniocb->context2 = (uint8_t *) outp;
+	geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
+
+	/* Fill in payload, bp points to frame payload */
+	icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+
+	/* Fill in rest of iocb */
+	icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	if (!tmo) {
+		 /* FC spec states we need 3 * ratov for CT requests */
+		tmo = (3 * phba->fc_ratov);
+	}
+	icmd->ulpTimeout = tmo;
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	icmd->ulpContext = ndlp->nlp_rpi;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+		/* For GEN_REQUEST64_CR, use the RPI */
+		icmd->ulpCt_h = 0;
+		icmd->ulpCt_l = 0;
+	}
+
+	/* Issue GEN REQ IOCB for NPORT <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0119 Issue GEN REQ IOCB to NPORT x%x "
+			 "Data: x%x x%x\n",
+			 ndlp->nlp_DID, icmd->ulpIoTag,
+			 vport->port_state);
+	geniocb->iocb_cmpl = cmpl;
+	geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+	geniocb->vport = vport;
+	geniocb->retry = retry;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+
+	if (rc == IOCB_ERROR) {
+		lpfc_sli_release_iocbq(phba, geniocb);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_ct_cmd - Build and issue a CT command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @inmp: Pointer to data buffer for response data.
+ * @bmp: Pointer to BPL for SLI command
+ * @ndlp: Destination NPort nodelist entry
+ * @cmpl: completion routine to call when command completes
+ *
+ * This function is called for issuing a CT command.
+ */
+static int
+lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
+	    struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
+	    void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+			  struct lpfc_iocbq *),
+	    uint32_t rsp_size, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
+	struct lpfc_dmabuf *outmp;
+	int cnt = 0, status;
+	int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
+		CommandResponse.bits.CmdRsp;
+
+	bpl++;			/* Skip past ct request */
+
+	/* Put buffer(s) for ct rsp in bpl */
+	outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
+	if (!outmp)
+		return -ENOMEM;
+	/*
+	 * Form the CT IOCB.  The total number of BDEs in this IOCB
+	 * is the single command plus response count from
+	 * lpfc_alloc_ct_rsp.
+	 */
+	cnt += 1;
+	status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
+			      cnt, 0, retry);
+	if (status) {
+		lpfc_free_ct_rsp(phba, outmp);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+struct lpfc_vport *
+lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
+	struct lpfc_vport *vport_curr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport_curr, &phba->port_list, listentry) {
+		if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return vport_curr;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return NULL;
+}
+
+static void
+lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+{
+	struct lpfc_nodelist *ndlp;
+
+	if ((vport->port_type != LPFC_NPIV_PORT) ||
+	    !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
+
+		ndlp = lpfc_setup_disc_node(vport, Did);
+
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"Parse GID_FTrsp: did:x%x flg:x%x x%x",
+				Did, ndlp->nlp_flag, vport->fc_flag);
+
+			ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
+			/* By default, the driver expects to support FCP FC4 */
+			if (fc4_type == FC_TYPE_FCP)
+				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
+			if (fc4_type == FC_TYPE_NVME)
+				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0238 Process x%06x NameServer Rsp "
+					 "Data: x%x x%x x%x x%x\n", Did,
+					 ndlp->nlp_flag, ndlp->nlp_fc4_type,
+					 vport->fc_flag,
+					 vport->fc_rscn_id_cnt);
+		} else {
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0239 Skip x%06x NameServer Rsp "
+					 "Data: x%x x%x\n", Did,
+					 vport->fc_flag,
+					 vport->fc_rscn_id_cnt);
+		}
+	} else {
+		if (!(vport->fc_flag & FC_RSCN_MODE) ||
+		    lpfc_rscn_payload_check(vport, Did)) {
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
+				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+			/*
+			 * This NPortID was previously a FCP/NVMe target,
+			 * Don't even bother to send GFF_ID.
+			 */
+			ndlp = lpfc_findnode_did(vport, Did);
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+			    (ndlp->nlp_type &
+			    (NLP_FCP_TARGET | NLP_NVME_TARGET))) {
+				if (fc4_type == FC_TYPE_FCP)
+					ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+				if (fc4_type == FC_TYPE_NVME)
+					ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+				lpfc_setup_disc_node(vport, Did);
+			} else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+				   0, Did) == 0)
+				vport->num_disc_nodes++;
+			else
+				lpfc_setup_disc_node(vport, Did);
+		} else {
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0245 Skip x%06x NameServer Rsp "
+					 "Data: x%x x%x\n", Did,
+					 vport->fc_flag,
+					 vport->fc_rscn_id_cnt);
+		}
+	}
+}
+
+static void
+lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/*
+	 * To conserve rpi's, filter out addresses for other
+	 * vports on the same physical HBAs.
+	 */
+	if (Did != vport->fc_myDID &&
+	    (!lpfc_find_vport_by_did(phba, Did) ||
+	     vport->cfg_peer_port_login)) {
+		if (!phba->nvmet_support) {
+			/* FCPI/NVMEI path. Process Did */
+			lpfc_prep_node_fc4type(vport, Did, fc4_type);
+			return;
+		}
+		/* NVMET path.  NVMET only cares about NVMEI nodes. */
+		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+			if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
+			    ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+				continue;
+			spin_lock_irq(shost->host_lock);
+			if (ndlp->nlp_DID == Did)
+				ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
+			else
+				ndlp->nlp_flag |= NLP_NVMET_RECOV;
+			spin_unlock_irq(shost->host_lock);
+		}
+	}
+}
+
+static int
+lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
+	    uint32_t Size)
+{
+	struct lpfc_sli_ct_request *Response =
+		(struct lpfc_sli_ct_request *) mp->virt;
+	struct lpfc_dmabuf *mlast, *next_mp;
+	uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
+	uint32_t Did, CTentry;
+	int Cnt;
+	struct list_head head;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp = NULL;
+
+	lpfc_set_disctmo(vport);
+	vport->num_disc_nodes = 0;
+	vport->fc_ns_retry = 0;
+
+
+	list_add_tail(&head, &mp->list);
+	list_for_each_entry_safe(mp, next_mp, &head, list) {
+		mlast = mp;
+
+		Cnt = Size  > FCELSSIZE ? FCELSSIZE : Size;
+
+		Size -= Cnt;
+
+		if (!ctptr) {
+			ctptr = (uint32_t *) mlast->virt;
+		} else
+			Cnt -= 16;	/* subtract length of CT header */
+
+		/* Loop through entire NameServer list of DIDs */
+		while (Cnt >= sizeof(uint32_t)) {
+			/* Get next DID from NameServer List */
+			CTentry = *ctptr++;
+			Did = ((be32_to_cpu(CTentry)) & Mask_DID);
+			lpfc_ns_rsp_audit_did(vport, Did, fc4_type);
+			if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
+				goto nsout1;
+
+			Cnt -= sizeof(uint32_t);
+		}
+		ctptr = NULL;
+
+	}
+
+	/* All GID_FT entries processed.  If the driver is running in
+	 * in target mode, put impacted nodes into recovery and drop
+	 * the RPI to flush outstanding IO.
+	 */
+	if (vport->phba->nvmet_support) {
+		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+			if (!(ndlp->nlp_flag & NLP_NVMET_RECOV))
+				continue;
+			lpfc_disc_state_machine(vport, ndlp, NULL,
+						NLP_EVT_DEVICE_RECOVERY);
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
+			spin_unlock_irq(shost->host_lock);
+		}
+	}
+
+nsout1:
+	list_del(&head);
+	return 0;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_dmabuf *outp;
+	struct lpfc_dmabuf *inp;
+	struct lpfc_sli_ct_request *CTrsp;
+	struct lpfc_sli_ct_request *CTreq;
+	struct lpfc_nodelist *ndlp;
+	int rc, type;
+
+	/* First save ndlp, before we overwrite it */
+	ndlp = cmdiocb->context_un.ndlp;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+	inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+	outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+	irsp = &rspiocb->iocb;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		 "GID_FT cmpl:     status:x%x/x%x rtry:%d",
+		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+
+	/* Don't bother processing response if vport is being torn down. */
+	if (vport->load_flag & FC_UNLOADING) {
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		goto out;
+	}
+
+	if (lpfc_els_chk_latt(vport)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0216 Link event during NS query\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		goto out;
+	}
+	if (lpfc_error_lost_link(irsp)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0226 NS query failed due to link event\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		goto out;
+	}
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+			if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+			    (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+			    IOERR_NO_RESOURCES)
+				vport->fc_ns_retry++;
+
+			type = lpfc_get_gidft_type(vport, cmdiocb);
+			if (type == 0)
+				goto out;
+
+			/* CT command is being retried */
+			vport->gidft_inp--;
+			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+					 vport->fc_ns_retry, type);
+			if (rc == 0)
+				goto out;
+		}
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0257 GID_FT Query error: 0x%x 0x%x\n",
+				 irsp->ulpStatus, vport->fc_ns_retry);
+	} else {
+		/* Good status, continue checking */
+		CTreq = (struct lpfc_sli_ct_request *) inp->virt;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0208 NameServer Rsp Data: x%x x%x\n",
+					 vport->fc_flag,
+					 CTreq->un.gid.Fc4Type);
+
+			lpfc_ns_rsp(vport,
+				    outp,
+				    CTreq->un.gid.Fc4Type,
+				    (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+		} else if (CTrsp->CommandResponse.bits.CmdRsp ==
+			   be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+			/* NameServer Rsp Error */
+			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
+			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0269 No NameServer Entries "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t) CTrsp->ReasonCode,
+					(uint32_t) CTrsp->Explanation,
+					vport->fc_flag);
+
+				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"GID_FT no entry  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t) CTrsp->ReasonCode,
+				(uint32_t) CTrsp->Explanation);
+			} else {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0240 NameServer Rsp Error "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t) CTrsp->ReasonCode,
+					(uint32_t) CTrsp->Explanation,
+					vport->fc_flag);
+
+				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"GID_FT rsp err1  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t) CTrsp->ReasonCode,
+				(uint32_t) CTrsp->Explanation);
+			}
+
+
+		} else {
+			/* NameServer Rsp Error */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					"0241 NameServer Rsp Error "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t) CTrsp->ReasonCode,
+					(uint32_t) CTrsp->Explanation,
+					vport->fc_flag);
+
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"GID_FT rsp err2  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t) CTrsp->ReasonCode,
+				(uint32_t) CTrsp->Explanation);
+		}
+		vport->gidft_inp--;
+	}
+	/* Link up / RSCN discovery */
+	if ((vport->num_disc_nodes == 0) &&
+	    (vport->gidft_inp == 0)) {
+		/*
+		 * The driver has cycled through all Nports in the RSCN payload.
+		 * Complete the handling by cleaning up and marking the
+		 * current driver state.
+		 */
+		if (vport->port_state >= LPFC_DISC_AUTH) {
+			if (vport->fc_flag & FC_RSCN_MODE) {
+				lpfc_els_flush_rscn(vport);
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+				spin_unlock_irq(shost->host_lock);
+			}
+			else
+				lpfc_els_flush_rscn(vport);
+		}
+
+		lpfc_disc_start(vport);
+	}
+out:
+	cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+	struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+	struct lpfc_sli_ct_request *CTrsp;
+	int did, rc, retry;
+	uint8_t fbits;
+	struct lpfc_nodelist *ndlp;
+
+	did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
+	did = be32_to_cpu(did);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		"GFF_ID cmpl:     status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		/* Good status, continue checking */
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
+
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+			if ((fbits & FC4_FEATURE_INIT) &&
+			    !(fbits & FC4_FEATURE_TARGET)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+						 LOG_DISCOVERY,
+						 "0270 Skip x%x GFF "
+						 "NameServer Rsp Data: (init) "
+						 "x%x x%x\n", did, fbits,
+						 vport->fc_rscn_id_cnt);
+				goto out;
+			}
+		}
+	}
+	else {
+		/* Check for retry */
+		if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
+			retry = 1;
+			if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch ((irsp->un.ulpWord[4] &
+					IOERR_PARAM_MASK)) {
+
+				case IOERR_NO_RESOURCES:
+					/* We don't increment the retry
+					 * count for this case.
+					 */
+					break;
+				case IOERR_LINK_DOWN:
+				case IOERR_SLI_ABORTED:
+				case IOERR_SLI_DOWN:
+					retry = 0;
+					break;
+				default:
+					cmdiocb->retry++;
+				}
+			}
+			else
+				cmdiocb->retry++;
+
+			if (retry) {
+				/* CT command is being retried */
+				rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+					 cmdiocb->retry, did);
+				if (rc == 0) {
+					/* success */
+					lpfc_ct_free_iocb(phba, cmdiocb);
+					return;
+				}
+			}
+		}
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0267 NameServer GFF Rsp "
+				 "x%x Error (%d %d) Data: x%x x%x\n",
+				 did, irsp->ulpStatus, irsp->un.ulpWord[4],
+				 vport->fc_flag, vport->fc_rscn_id_cnt);
+	}
+
+	/* This is a target port, unregistered port, or the GFF_ID failed */
+	ndlp = lpfc_setup_disc_node(vport, did);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0242 Process x%x GFF "
+				 "NameServer Rsp Data: x%x x%x x%x\n",
+				 did, ndlp->nlp_flag, vport->fc_flag,
+				 vport->fc_rscn_id_cnt);
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0243 Skip x%x GFF "
+				 "NameServer Rsp Data: x%x x%x\n", did,
+				 vport->fc_flag, vport->fc_rscn_id_cnt);
+	}
+out:
+	/* Link up / RSCN discovery */
+	if (vport->num_disc_nodes)
+		vport->num_disc_nodes--;
+	if (vport->num_disc_nodes == 0) {
+		/*
+		 * The driver has cycled through all Nports in the RSCN payload.
+		 * Complete the handling by cleaning up and marking the
+		 * current driver state.
+		 */
+		if (vport->port_state >= LPFC_DISC_AUTH) {
+			if (vport->fc_flag & FC_RSCN_MODE) {
+				lpfc_els_flush_rscn(vport);
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+				spin_unlock_irq(shost->host_lock);
+			}
+			else
+				lpfc_els_flush_rscn(vport);
+		}
+		lpfc_disc_start(vport);
+	}
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+				struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
+	struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+	struct lpfc_sli_ct_request *CTrsp;
+	int did;
+	struct lpfc_nodelist *ndlp;
+	uint32_t fc4_data_0, fc4_data_1;
+
+	did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
+	did = be32_to_cpu(did);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+			      "GFT_ID cmpl: status:x%x/x%x did:x%x",
+			      irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		/* Good status, continue checking */
+		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
+		fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
+		fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
+				 did, fc4_data_0, fc4_data_1);
+
+		ndlp = lpfc_findnode_did(vport, did);
+		if (ndlp) {
+			/* The bitmask value for FCP and NVME FCP types is
+			 * the same because they are 32 bits distant from
+			 * each other in word0 and word0.
+			 */
+			if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK)
+				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
+				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "3064 Setting ndlp %p, DID x%06x with "
+					 "FC4 x%08x, Data: x%08x x%08x\n",
+					 ndlp, did, ndlp->nlp_fc4_type,
+					 FC_TYPE_FCP, FC_TYPE_NVME);
+			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+			lpfc_issue_els_prli(vport, ndlp, 0);
+		}
+	} else
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+
+	lpfc_ct_free_iocb(phba, cmdiocb);
+}
+
+static void
+lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+	     struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct lpfc_dmabuf *inp;
+	struct lpfc_dmabuf *outp;
+	IOCB_t *irsp;
+	struct lpfc_sli_ct_request *CTrsp;
+	struct lpfc_nodelist *ndlp;
+	int cmdcode, rc;
+	uint8_t retry;
+	uint32_t latt;
+
+	/* First save ndlp, before we overwrite it */
+	ndlp = cmdiocb->context_un.ndlp;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+	outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+	irsp = &rspiocb->iocb;
+
+	cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
+					CommandResponse.bits.CmdRsp);
+	CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+
+	latt = lpfc_els_chk_latt(vport);
+
+	/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0209 CT Request completes, latt %d, "
+			 "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+			 latt, irsp->ulpStatus,
+			 CTrsp->CommandResponse.bits.CmdRsp,
+			 cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		"CT cmd cmpl:     status:x%x/x%x cmd:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+
+	if (irsp->ulpStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0268 NS cmd x%x Error (x%x x%x)\n",
+				 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+			  IOERR_SLI_DOWN) ||
+			 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+			  IOERR_SLI_ABORTED)))
+			goto out;
+
+		retry = cmdiocb->retry;
+		if (retry >= LPFC_MAX_NS_RETRY)
+			goto out;
+
+		retry++;
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0250 Retrying NS cmd %x\n", cmdcode);
+		rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
+		if (rc == 0)
+			goto out;
+	}
+
+out:
+	cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RFT_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RNN_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			 struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RSPN_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			 struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RSNN_NN;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	/* even if it fails we will act as though it succeeded. */
+	vport->ct_flags = 0;
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RFF_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+/*
+ * Although the symbolic port name is thought to be an integer
+ * as of January 18, 2016, leave it as a string until more of
+ * the record state becomes defined.
+ */
+int
+lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
+	size_t size)
+{
+	int n;
+
+	/*
+	 * Use the lpfc board number as the Symbolic Port
+	 * Name object.  NPIV is not in play so this integer
+	 * value is sufficient and unique per FC-ID.
+	 */
+	n = snprintf(symbol, size, "%d", vport->phba->brd_no);
+	return n;
+}
+
+
+int
+lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+	size_t size)
+{
+	char fwrev[FW_REV_STR_SIZE];
+	int n;
+
+	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+
+	n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
+	if (size < n)
+		return n;
+
+	n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+	if (size < n)
+		return n;
+
+	n += snprintf(symbol + n, size - n, " DV%s.",
+		      lpfc_release_version);
+	if (size < n)
+		return n;
+
+	n += snprintf(symbol + n, size - n, " HN:%s.",
+		      init_utsname()->nodename);
+	if (size < n)
+		return n;
+
+	/* Note :- OS name is "Linux" */
+	n += snprintf(symbol + n, size - n, " OS:%s\n",
+		      init_utsname()->sysname);
+	return n;
+}
+
+static uint32_t
+lpfc_find_map_node(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	struct Scsi_Host  *shost;
+	uint32_t cnt = 0;
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (ndlp->nlp_type & NLP_FABRIC)
+			continue;
+		if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) ||
+		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
+			cnt++;
+	}
+	spin_unlock_irq(shost->host_lock);
+	return cnt;
+}
+
+/*
+ * This routine will return the FC4 Type associated with the CT
+ * GID_FT command.
+ */
+int
+lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_sli_ct_request *CtReq;
+	struct lpfc_dmabuf *mp;
+	uint32_t type;
+
+	mp = cmdiocb->context1;
+	if (mp == NULL)
+		return 0;
+	CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+	type = (uint32_t)CtReq->un.gid.Fc4Type;
+	if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME))
+		return 0;
+	return type;
+}
+
+/*
+ * lpfc_ns_cmd
+ * Description:
+ *    Issue Cmd to NameServer
+ *       SLI_CTNS_GID_FT
+ *       LI_CTNS_RFT_ID
+ */
+int
+lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
+	    uint8_t retry, uint32_t context)
+{
+	struct lpfc_nodelist * ndlp;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_dmabuf *mp, *bmp;
+	struct lpfc_sli_ct_request *CtReq;
+	struct ulp_bde64 *bpl;
+	void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+		      struct lpfc_iocbq *) = NULL;
+	uint32_t rsp_size = 1024;
+	size_t   size;
+	int rc = 0;
+
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
+	    || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+		rc=1;
+		goto ns_cmd_exit;
+	}
+
+	/* fill in BDEs for command */
+	/* Allocate buffer for command payload */
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		rc=2;
+		goto ns_cmd_exit;
+	}
+
+	INIT_LIST_HEAD(&mp->list);
+	mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+	if (!mp->virt) {
+		rc=3;
+		goto ns_cmd_free_mp;
+	}
+
+	/* Allocate buffer for Buffer ptr list */
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc=4;
+		goto ns_cmd_free_mpvirt;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
+	if (!bmp->virt) {
+		rc=5;
+		goto ns_cmd_free_bmp;
+	}
+
+	/* NameServer Req */
+	lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
+			 "0236 NameServer Req Data: x%x x%x x%x x%x\n",
+			 cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
+			 context);
+
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	memset(bpl, 0, sizeof(struct ulp_bde64));
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+	bpl->tus.f.bdeFlags = 0;
+	if (cmdcode == SLI_CTNS_GID_FT)
+		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_GFF_ID)
+		bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_GFT_ID)
+		bpl->tus.f.bdeSize = GFT_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RFT_ID)
+		bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RNN_ID)
+		bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RSPN_ID)
+		bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RSNN_NN)
+		bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_DA_ID)
+		bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RFF_ID)
+		bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
+	else
+		bpl->tus.f.bdeSize = 0;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+	memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	CtReq->RevisionId.bits.InId = 0;
+	CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
+	CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
+	CtReq->CommandResponse.bits.Size = 0;
+	switch (cmdcode) {
+	case SLI_CTNS_GID_FT:
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_GID_FT);
+		CtReq->un.gid.Fc4Type = context;
+
+		if (vport->port_state < LPFC_NS_QRY)
+			vport->port_state = LPFC_NS_QRY;
+		lpfc_set_disctmo(vport);
+		cmpl = lpfc_cmpl_ct_cmd_gid_ft;
+		rsp_size = FC_MAX_NS_RSP;
+		break;
+
+	case SLI_CTNS_GFF_ID:
+		CtReq->CommandResponse.bits.CmdRsp =
+			cpu_to_be16(SLI_CTNS_GFF_ID);
+		CtReq->un.gff.PortId = cpu_to_be32(context);
+		cmpl = lpfc_cmpl_ct_cmd_gff_id;
+		break;
+
+	case SLI_CTNS_GFT_ID:
+		CtReq->CommandResponse.bits.CmdRsp =
+			cpu_to_be16(SLI_CTNS_GFT_ID);
+		CtReq->un.gft.PortId = cpu_to_be32(context);
+		cmpl = lpfc_cmpl_ct_cmd_gft_id;
+		break;
+
+	case SLI_CTNS_RFT_ID:
+		vport->ct_flags &= ~FC_CT_RFT_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_RFT_ID);
+		CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+
+		/* Register FC4 FCP type if enabled.  */
+		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+			CtReq->un.rft.fcpReg = 1;
+
+		/* Register NVME type if enabled.  Defined LE and swapped.
+		 * rsvd[0] is used as word1 because of the hard-coded
+		 * word0 usage in the ct_request data structure.
+		 */
+		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+			CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100);
+
+		cmpl = lpfc_cmpl_ct_cmd_rft_id;
+		break;
+
+	case SLI_CTNS_RNN_ID:
+		vport->ct_flags &= ~FC_CT_RNN_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_RNN_ID);
+		CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
+		memcpy(CtReq->un.rnn.wwnn,  &vport->fc_nodename,
+		       sizeof(struct lpfc_name));
+		cmpl = lpfc_cmpl_ct_cmd_rnn_id;
+		break;
+
+	case SLI_CTNS_RSPN_ID:
+		vport->ct_flags &= ~FC_CT_RSPN_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_RSPN_ID);
+		CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
+		size = sizeof(CtReq->un.rspn.symbname);
+		CtReq->un.rspn.len =
+			lpfc_vport_symbolic_port_name(vport,
+			CtReq->un.rspn.symbname, size);
+		cmpl = lpfc_cmpl_ct_cmd_rspn_id;
+		break;
+	case SLI_CTNS_RSNN_NN:
+		vport->ct_flags &= ~FC_CT_RSNN_NN;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_RSNN_NN);
+		memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
+		       sizeof(struct lpfc_name));
+		size = sizeof(CtReq->un.rsnn.symbname);
+		CtReq->un.rsnn.len =
+			lpfc_vport_symbolic_node_name(vport,
+			CtReq->un.rsnn.symbname, size);
+		cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
+		break;
+	case SLI_CTNS_DA_ID:
+		/* Implement DA_ID Nameserver request */
+		CtReq->CommandResponse.bits.CmdRsp =
+			cpu_to_be16(SLI_CTNS_DA_ID);
+		CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
+		cmpl = lpfc_cmpl_ct_cmd_da_id;
+		break;
+	case SLI_CTNS_RFF_ID:
+		vport->ct_flags &= ~FC_CT_RFF_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    cpu_to_be16(SLI_CTNS_RFF_ID);
+		CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
+		CtReq->un.rff.fbits = FC4_FEATURE_INIT;
+
+		/* The driver always supports FC_TYPE_FCP.  However, the
+		 * caller can specify NVME (type x28) as well.  But only
+		 * these that FC4 type is supported.
+		 */
+		if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		     (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
+		    (context == FC_TYPE_NVME)) {
+			if ((vport == phba->pport) && phba->nvmet_support) {
+				CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
+					FC4_FEATURE_NVME_DISC);
+				lpfc_nvmet_update_targetport(phba);
+			} else {
+				lpfc_nvme_update_localport(vport);
+			}
+			CtReq->un.rff.type_code = context;
+
+		} else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+			    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
+			   (context == FC_TYPE_FCP))
+			CtReq->un.rff.type_code = context;
+
+		else
+			goto ns_cmd_free_bmpvirt;
+
+		cmpl = lpfc_cmpl_ct_cmd_rff_id;
+		break;
+	}
+	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+	 * to hold ndlp reference for the corresponding callback function.
+	 */
+	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
+		/* On success, The cmpl function will free the buffers */
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+			"Issue CT cmd:    cmd:x%x did:x%x",
+			cmdcode, ndlp->nlp_DID, 0);
+		return 0;
+	}
+	rc=6;
+
+	/* Decrement ndlp reference count to release ndlp reference held
+	 * for the failed command's callback function.
+	 */
+	lpfc_nlp_put(ndlp);
+
+ns_cmd_free_bmpvirt:
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ns_cmd_free_bmp:
+	kfree(bmp);
+ns_cmd_free_mpvirt:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ns_cmd_free_mp:
+	kfree(mp);
+ns_cmd_exit:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+			 cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
+	return 1;
+}
+
+/**
+ * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to the command IOCBQ.
+ * @rspiocb: Pointer to the response IOCBQ.
+ *
+ * This function to handle the completion of a driver initiated FDMI
+ * CT command issued during discovery.
+ */
+static void
+lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		       struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct lpfc_dmabuf *inp = cmdiocb->context1;
+	struct lpfc_dmabuf *outp = cmdiocb->context2;
+	struct lpfc_sli_ct_request *CTcmd = inp->virt;
+	struct lpfc_sli_ct_request *CTrsp = outp->virt;
+	uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+	uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_nodelist *ndlp;
+	uint32_t latt, cmd, err;
+
+	latt = lpfc_els_chk_latt(vport);
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		"FDMI cmpl:       status:x%x/x%x latt:%d",
+		irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+
+	if (latt || irsp->ulpStatus) {
+
+		/* Look for a retryable error */
+		if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+			case IOERR_SLI_ABORTED:
+			case IOERR_ABORT_IN_PROGRESS:
+			case IOERR_SEQUENCE_TIMEOUT:
+			case IOERR_ILLEGAL_FRAME:
+			case IOERR_NO_RESOURCES:
+			case IOERR_ILLEGAL_COMMAND:
+				cmdiocb->retry++;
+				if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY)
+					break;
+
+				/* Retry the same FDMI command */
+				err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING,
+							  cmdiocb, 0);
+				if (err == IOCB_ERROR)
+					break;
+				return;
+			default:
+				break;
+			}
+		}
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0229 FDMI cmd %04x failed, latt = %d "
+				 "ulpStatus: x%x, rid x%x\n",
+				 be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+	}
+	lpfc_ct_free_iocb(phba, cmdiocb);
+
+	ndlp = lpfc_findnode_did(vport, FDMI_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	/* Check for a CT LS_RJT response */
+	cmd =  be16_to_cpu(fdmi_cmd);
+	if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
+		/* FDMI rsp failed */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
+
+		/* Should we fallback to FDMI-2 / FDMI-1 ? */
+		switch (cmd) {
+		case SLI_MGMT_RHBA:
+			if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
+				/* Fallback to FDMI-1 */
+				vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
+				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
+				/* Start over */
+				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+			}
+			return;
+
+		case SLI_MGMT_RPRT:
+			if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
+				/* Fallback to FDMI-1 */
+				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
+				/* Start over */
+				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
+			}
+			if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
+				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+				/* Retry the same command */
+				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
+			}
+			return;
+
+		case SLI_MGMT_RPA:
+			if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
+				/* Fallback to FDMI-1 */
+				vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
+				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
+				/* Start over */
+				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+			}
+			if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
+				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+				/* Retry the same command */
+				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
+			}
+			return;
+		}
+	}
+
+	/*
+	 * On success, need to cycle thru FDMI registration for discovery
+	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
+	 * DPRT -> RPRT (vports)
+	 */
+	switch (cmd) {
+	case SLI_MGMT_RHBA:
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
+		break;
+
+	case SLI_MGMT_DHBA:
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+		break;
+
+	case SLI_MGMT_DPRT:
+		if (vport->port_type == LPFC_PHYSICAL_PORT)
+			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
+		else
+			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+		break;
+	}
+	return;
+}
+
+
+/**
+ * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * Called from hbeat timeout routine to check if the number of discovered
+ * ports has changed. If so, re-register thar port Attribute.
+ */
+void
+lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_nodelist *ndlp;
+	uint16_t cnt;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	/* Must be connected to a Fabric */
+	if (!(vport->fc_flag & FC_FABRIC))
+		return;
+
+	if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
+		return;
+
+	cnt = lpfc_find_map_node(vport);
+	if (cnt == vport->fdmi_num_disc)
+		return;
+
+	ndlp = lpfc_findnode_did(vport, FDMI_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
+			      LPFC_FDMI_PORT_ATTR_num_disc);
+	} else {
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
+			      LPFC_FDMI_PORT_ATTR_num_disc);
+	}
+}
+
+/* Routines for all individual HBA attributes */
+static int
+lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
+	       sizeof(struct lpfc_name));
+	size = FOURBYTES + sizeof(struct lpfc_name);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_NODENAME);
+	return size;
+}
+static int
+lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
+				struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	/* This string MUST be consistent with other FC platforms
+	 * supported by Broadcom.
+	 */
+	strncpy(ae->un.AttrString,
+		"Emulex Corporation",
+		       sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, phba->SerialNumber,
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
+			 struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, phba->ModelName,
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_MODEL);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
+			       struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, phba->ModelDesc,
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+				  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
+			   struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	lpfc_vpd_t *vp = &phba->vpd;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t i, j, incr, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	/* Convert JEDEC ID to ascii for hardware version */
+	incr = vp->rev.biuRev;
+	for (i = 0; i < 8; i++) {
+		j = (incr & 0xf);
+		if (j <= 9)
+			ae->un.AttrString[7 - i] =
+			    (char)((uint8_t) 0x30 +
+				   (uint8_t) j);
+		else
+			ae->un.AttrString[7 - i] =
+			    (char)((uint8_t) 0x61 +
+				   (uint8_t) (j - 10));
+		incr = (incr >> 4);
+	}
+	size = FOURBYTES + 8;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
+			    struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, lpfc_release_version,
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
+			   struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+	else
+		strncpy(ae->un.AttrString, phba->OptionROMVersion,
+			sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
+			   struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
+			  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
+		 init_utsname()->sysname,
+		 init_utsname()->release,
+		 init_utsname()->version);
+
+	len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
+			  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	ae->un.AttrInt =  cpu_to_be32(LPFC_MAX_CT_SIZE);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
+				 struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	len = lpfc_vport_symbolic_node_name(vport,
+				ae->un.AttrString, 256);
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
+			       struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	/* Nothing is defined for this currently */
+	ae->un.AttrInt =  cpu_to_be32(0);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
+			     struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	/* Each driver instance corresponds to a single port */
+	ae->un.AttrInt =  cpu_to_be32(1);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
+			       struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
+	       sizeof(struct lpfc_name));
+	size = FOURBYTES + sizeof(struct lpfc_name);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
+			    struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
+			      struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	/* Driver doesn't have access to this information */
+	ae->un.AttrInt =  cpu_to_be32(0);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE);
+	return size;
+}
+
+static int
+lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
+			     struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, "EMULEX",
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID);
+	return size;
+}
+
+/* Routines for all individual PORT attributes */
+static int
+lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
+			    struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+	if (vport->nvmei_support || vport->phba->nvmet_support)
+		ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+	ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+	size = FOURBYTES + 32;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
+				  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	ae->un.AttrInt = 0;
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		if (phba->lmt & LMT_32Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+		if (phba->lmt & LMT_16Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
+		if (phba->lmt & LMT_10Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
+		if (phba->lmt & LMT_8Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
+		if (phba->lmt & LMT_4Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
+		if (phba->lmt & LMT_2Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
+		if (phba->lmt & LMT_1Gb)
+			ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+	} else {
+		/* FCoE links support only one speed */
+		switch (phba->fc_linkspeed) {
+		case LPFC_ASYNC_LINK_SPEED_10GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_10GE;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_25GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_25GE;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_40GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_40GE;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_100GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_100GE;
+			break;
+		}
+	}
+	ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
+			  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		switch (phba->fc_linkspeed) {
+		case LPFC_LINK_SPEED_1GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_1GFC;
+			break;
+		case LPFC_LINK_SPEED_2GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_2GFC;
+			break;
+		case LPFC_LINK_SPEED_4GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_4GFC;
+			break;
+		case LPFC_LINK_SPEED_8GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_8GFC;
+			break;
+		case LPFC_LINK_SPEED_10GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_10GFC;
+			break;
+		case LPFC_LINK_SPEED_16GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_16GFC;
+			break;
+		case LPFC_LINK_SPEED_32GHZ:
+			ae->un.AttrInt = HBA_PORTSPEED_32GFC;
+			break;
+		default:
+			ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+			break;
+		}
+	} else {
+		switch (phba->fc_linkspeed) {
+		case LPFC_ASYNC_LINK_SPEED_10GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_10GE;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_25GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_25GE;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_40GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_40GE;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_100GBPS:
+			ae->un.AttrInt = HBA_PORTSPEED_100GE;
+			break;
+		default:
+			ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+			break;
+		}
+	}
+
+	ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
+			      struct lpfc_fdmi_attr_def *ad)
+{
+	struct serv_parm *hsp;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	hsp = (struct serv_parm *)&vport->fc_sparam;
+	ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+			  (uint32_t) hsp->cmn.bbRcvSizeLsb;
+	ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
+			       struct lpfc_fdmi_attr_def *ad)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
+		 "/sys/class/scsi_host/host%d", shost->host_no);
+	len = strnlen((char *)ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
+			      struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+		 init_utsname()->nodename);
+
+	len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
+			 struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
+	       sizeof(struct lpfc_name));
+	size = FOURBYTES + sizeof(struct lpfc_name);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_NODENAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
+			 struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
+	       sizeof(struct lpfc_name));
+	size = FOURBYTES + sizeof(struct lpfc_name);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
+				  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
+			      struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+		ae->un.AttrInt =  cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
+	else
+		ae->un.AttrInt =  cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
+			  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
+				struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
+	       sizeof(struct lpfc_name));
+	size = FOURBYTES + sizeof(struct lpfc_name);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
+				   struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+	if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+	ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+	size = FOURBYTES + 32;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
+			       struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	/* Link Up - operational */
+	ae->un.AttrInt =  cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
+			     struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	vport->fdmi_num_disc = lpfc_find_map_node(vport);
+	ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
+	return size;
+}
+
+static int
+lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
+			    struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	ae->un.AttrInt =  cpu_to_be32(vport->fc_myDID);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
+			     struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, "Smart SAN Initiator",
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
+			  struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
+	       sizeof(struct lpfc_name));
+	memcpy((((uint8_t *)&ae->un.AttrString) +
+		sizeof(struct lpfc_name)),
+		&vport->fc_sparam.portName, sizeof(struct lpfc_name));
+	size = FOURBYTES + (2 * sizeof(struct lpfc_name));
+	ad->AttrLen =  cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_GUID);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
+			     struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString,
+			  sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen =  cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
+			   struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t len, size;
+
+	ae = &ad->AttrValue;
+	memset(ae, 0, sizeof(*ae));
+
+	strncpy(ae->un.AttrString, phba->ModelName,
+		sizeof(ae->un.AttrString));
+	len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+	len += (len & 3) ? (4 - (len & 3)) : 4;
+	size = FOURBYTES + len;
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
+			       struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+
+	/* SRIOV (type 3) is not supported */
+	if (vport->vpi)
+		ae->un.AttrInt =  cpu_to_be32(2);  /* NPIV */
+	else
+		ae->un.AttrInt =  cpu_to_be32(1);  /* Physical */
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
+			 struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	ae->un.AttrInt =  cpu_to_be32(0);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_QOS);
+	return size;
+}
+
+static int
+lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
+			      struct lpfc_fdmi_attr_def *ad)
+{
+	struct lpfc_fdmi_attr_entry *ae;
+	uint32_t size;
+
+	ae = &ad->AttrValue;
+	ae->un.AttrInt =  cpu_to_be32(1);
+	size = FOURBYTES + sizeof(uint32_t);
+	ad->AttrLen = cpu_to_be16(size);
+	ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
+	return size;
+}
+
+/* RHBA attribute jump table */
+int (*lpfc_fdmi_hba_action[])
+	(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+	/* Action routine                 Mask bit     Attribute type */
+	lpfc_fdmi_hba_attr_wwnn,	  /* bit0     RHBA_NODENAME           */
+	lpfc_fdmi_hba_attr_manufacturer,  /* bit1     RHBA_MANUFACTURER       */
+	lpfc_fdmi_hba_attr_sn,		  /* bit2     RHBA_SERIAL_NUMBER      */
+	lpfc_fdmi_hba_attr_model,	  /* bit3     RHBA_MODEL              */
+	lpfc_fdmi_hba_attr_description,	  /* bit4     RHBA_MODEL_DESCRIPTION  */
+	lpfc_fdmi_hba_attr_hdw_ver,	  /* bit5     RHBA_HARDWARE_VERSION   */
+	lpfc_fdmi_hba_attr_drvr_ver,	  /* bit6     RHBA_DRIVER_VERSION     */
+	lpfc_fdmi_hba_attr_rom_ver,	  /* bit7     RHBA_OPTION_ROM_VERSION */
+	lpfc_fdmi_hba_attr_fmw_ver,	  /* bit8     RHBA_FIRMWARE_VERSION   */
+	lpfc_fdmi_hba_attr_os_ver,	  /* bit9     RHBA_OS_NAME_VERSION    */
+	lpfc_fdmi_hba_attr_ct_len,	  /* bit10    RHBA_MAX_CT_PAYLOAD_LEN */
+	lpfc_fdmi_hba_attr_symbolic_name, /* bit11    RHBA_SYM_NODENAME       */
+	lpfc_fdmi_hba_attr_vendor_info,	  /* bit12    RHBA_VENDOR_INFO        */
+	lpfc_fdmi_hba_attr_num_ports,	  /* bit13    RHBA_NUM_PORTS          */
+	lpfc_fdmi_hba_attr_fabric_wwnn,	  /* bit14    RHBA_FABRIC_WWNN        */
+	lpfc_fdmi_hba_attr_bios_ver,	  /* bit15    RHBA_BIOS_VERSION       */
+	lpfc_fdmi_hba_attr_bios_state,	  /* bit16    RHBA_BIOS_STATE         */
+	lpfc_fdmi_hba_attr_vendor_id,	  /* bit17    RHBA_VENDOR_ID          */
+};
+
+/* RPA / RPRT attribute jump table */
+int (*lpfc_fdmi_port_action[])
+	(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+	/* Action routine                   Mask bit   Attribute type */
+	lpfc_fdmi_port_attr_fc4type,        /* bit0   RPRT_SUPPORT_FC4_TYPES  */
+	lpfc_fdmi_port_attr_support_speed,  /* bit1   RPRT_SUPPORTED_SPEED    */
+	lpfc_fdmi_port_attr_speed,          /* bit2   RPRT_PORT_SPEED         */
+	lpfc_fdmi_port_attr_max_frame,      /* bit3   RPRT_MAX_FRAME_SIZE     */
+	lpfc_fdmi_port_attr_os_devname,     /* bit4   RPRT_OS_DEVICE_NAME     */
+	lpfc_fdmi_port_attr_host_name,      /* bit5   RPRT_HOST_NAME          */
+	lpfc_fdmi_port_attr_wwnn,           /* bit6   RPRT_NODENAME           */
+	lpfc_fdmi_port_attr_wwpn,           /* bit7   RPRT_PORTNAME           */
+	lpfc_fdmi_port_attr_symbolic_name,  /* bit8   RPRT_SYM_PORTNAME       */
+	lpfc_fdmi_port_attr_port_type,      /* bit9   RPRT_PORT_TYPE          */
+	lpfc_fdmi_port_attr_class,          /* bit10  RPRT_SUPPORTED_CLASS    */
+	lpfc_fdmi_port_attr_fabric_wwpn,    /* bit11  RPRT_FABRICNAME         */
+	lpfc_fdmi_port_attr_active_fc4type, /* bit12  RPRT_ACTIVE_FC4_TYPES   */
+	lpfc_fdmi_port_attr_port_state,     /* bit13  RPRT_PORT_STATE         */
+	lpfc_fdmi_port_attr_num_disc,       /* bit14  RPRT_DISC_PORT          */
+	lpfc_fdmi_port_attr_nportid,        /* bit15  RPRT_PORT_ID            */
+	lpfc_fdmi_smart_attr_service,       /* bit16  RPRT_SMART_SERVICE      */
+	lpfc_fdmi_smart_attr_guid,          /* bit17  RPRT_SMART_GUID         */
+	lpfc_fdmi_smart_attr_version,       /* bit18  RPRT_SMART_VERSION      */
+	lpfc_fdmi_smart_attr_model,         /* bit19  RPRT_SMART_MODEL        */
+	lpfc_fdmi_smart_attr_port_info,     /* bit20  RPRT_SMART_PORT_INFO    */
+	lpfc_fdmi_smart_attr_qos,           /* bit21  RPRT_SMART_QOS          */
+	lpfc_fdmi_smart_attr_security,      /* bit22  RPRT_SMART_SECURITY     */
+};
+
+/**
+ * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
+ * cmdcode: FDMI command to send
+ * mask: Mask of HBA or PORT Attributes to send
+ *
+ * Builds and sends a FDMI command using the CT subsystem.
+ */
+int
+lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      int cmdcode, uint32_t new_mask)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_dmabuf *mp, *bmp;
+	struct lpfc_sli_ct_request *CtReq;
+	struct ulp_bde64 *bpl;
+	uint32_t bit_pos;
+	uint32_t size;
+	uint32_t rsp_size;
+	uint32_t mask;
+	struct lpfc_fdmi_reg_hba *rh;
+	struct lpfc_fdmi_port_entry *pe;
+	struct lpfc_fdmi_reg_portattr *pab = NULL;
+	struct lpfc_fdmi_attr_block *ab = NULL;
+	int  (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad);
+	void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+		     struct lpfc_iocbq *);
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return 0;
+
+	cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
+
+	/* fill in BDEs for command */
+	/* Allocate buffer for command payload */
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp)
+		goto fdmi_cmd_exit;
+
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+	if (!mp->virt)
+		goto fdmi_cmd_free_mp;
+
+	/* Allocate buffer for Buffer ptr list */
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp)
+		goto fdmi_cmd_free_mpvirt;
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
+	if (!bmp->virt)
+		goto fdmi_cmd_free_bmp;
+
+	INIT_LIST_HEAD(&mp->list);
+	INIT_LIST_HEAD(&bmp->list);
+
+	/* FDMI request */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0218 FDMI Request Data: x%x x%x x%x\n",
+			 vport->fc_flag, vport->port_state, cmdcode);
+	CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+
+	/* First populate the CT_IU preamble */
+	memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	CtReq->RevisionId.bits.InId = 0;
+
+	CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+	CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+
+	CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+	rsp_size = LPFC_BPL_SIZE;
+	size = 0;
+
+	/* Next fill in the specific FDMI cmd information */
+	switch (cmdcode) {
+	case SLI_MGMT_RHAT:
+	case SLI_MGMT_RHBA:
+		rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+		/* HBA Identifier */
+		memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
+		       sizeof(struct lpfc_name));
+
+		if (cmdcode == SLI_MGMT_RHBA) {
+			/* Registered Port List */
+			/* One entry (port) per adapter */
+			rh->rpl.EntryCnt = cpu_to_be32(1);
+			memcpy(&rh->rpl.pe.PortName,
+			       &phba->pport->fc_sparam.portName,
+			       sizeof(struct lpfc_name));
+
+			/* point to the HBA attribute block */
+			size = 2 * sizeof(struct lpfc_name) +
+				FOURBYTES;
+		} else {
+			size = sizeof(struct lpfc_name);
+		}
+		ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
+		ab->EntryCnt = 0;
+		size += FOURBYTES;
+		bit_pos = 0;
+		if (new_mask)
+			mask = new_mask;
+		else
+			mask = vport->fdmi_hba_mask;
+
+		/* Mask will dictate what attributes to build in the request */
+		while (mask) {
+			if (mask & 0x1) {
+				func = lpfc_fdmi_hba_action[bit_pos];
+				size += func(vport,
+					     (struct lpfc_fdmi_attr_def *)
+					     ((uint8_t *)rh + size));
+				ab->EntryCnt++;
+				if ((size + 256) >
+				    (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+					goto hba_out;
+			}
+			mask = mask >> 1;
+			bit_pos++;
+		}
+hba_out:
+		ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
+		/* Total size */
+		size = GID_REQUEST_SZ - 4 + size;
+		break;
+
+	case SLI_MGMT_RPRT:
+	case SLI_MGMT_RPA:
+		pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
+		if (cmdcode == SLI_MGMT_RPRT) {
+			rh = (struct lpfc_fdmi_reg_hba *)pab;
+			/* HBA Identifier */
+			memcpy(&rh->hi.PortName,
+			       &phba->pport->fc_sparam.portName,
+			       sizeof(struct lpfc_name));
+			pab = (struct lpfc_fdmi_reg_portattr *)
+				((uint8_t *)pab +  sizeof(struct lpfc_name));
+		}
+
+		memcpy((uint8_t *)&pab->PortName,
+		       (uint8_t *)&vport->fc_sparam.portName,
+		       sizeof(struct lpfc_name));
+		size += sizeof(struct lpfc_name) + FOURBYTES;
+		pab->ab.EntryCnt = 0;
+		bit_pos = 0;
+		if (new_mask)
+			mask = new_mask;
+		else
+			mask = vport->fdmi_port_mask;
+
+		/* Mask will dictate what attributes to build in the request */
+		while (mask) {
+			if (mask & 0x1) {
+				func = lpfc_fdmi_port_action[bit_pos];
+				size += func(vport,
+					     (struct lpfc_fdmi_attr_def *)
+					     ((uint8_t *)pab + size));
+				pab->ab.EntryCnt++;
+				if ((size + 256) >
+				    (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
+					goto port_out;
+			}
+			mask = mask >> 1;
+			bit_pos++;
+		}
+port_out:
+		pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
+		/* Total size */
+		if (cmdcode == SLI_MGMT_RPRT)
+			size += sizeof(struct lpfc_name);
+		size = GID_REQUEST_SZ - 4 + size;
+		break;
+
+	case SLI_MGMT_GHAT:
+	case SLI_MGMT_GRPL:
+		rsp_size = FC_MAX_NS_RSP;
+	case SLI_MGMT_DHBA:
+	case SLI_MGMT_DHAT:
+		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+		memcpy((uint8_t *)&pe->PortName,
+		       (uint8_t *)&vport->fc_sparam.portName,
+		       sizeof(struct lpfc_name));
+		size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
+		break;
+
+	case SLI_MGMT_GPAT:
+	case SLI_MGMT_GPAS:
+		rsp_size = FC_MAX_NS_RSP;
+	case SLI_MGMT_DPRT:
+	case SLI_MGMT_DPA:
+		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+		memcpy((uint8_t *)&pe->PortName,
+		       (uint8_t *)&vport->fc_sparam.portName,
+		       sizeof(struct lpfc_name));
+		size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
+		break;
+	case SLI_MGMT_GRHL:
+		size = GID_REQUEST_SZ - 4;
+		break;
+	default:
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+				 "0298 FDMI cmdcode x%x not supported\n",
+				 cmdcode);
+		goto fdmi_cmd_free_bmpvirt;
+	}
+	CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
+
+	bpl = (struct ulp_bde64 *)bmp->virt;
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+	bpl->tus.f.bdeFlags = 0;
+	bpl->tus.f.bdeSize = size;
+
+	/*
+	 * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+	 * to hold ndlp reference for the corresponding callback function.
+	 */
+	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+		return 0;
+
+	/*
+	 * Decrement ndlp reference count to release ndlp reference held
+	 * for the failed command's callback function.
+	 */
+	lpfc_nlp_put(ndlp);
+
+fdmi_cmd_free_bmpvirt:
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+fdmi_cmd_free_bmp:
+	kfree(bmp);
+fdmi_cmd_free_mpvirt:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+fdmi_cmd_free_mp:
+	kfree(mp);
+fdmi_cmd_exit:
+	/* Issue FDMI request failed */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0244 Issue FDMI request failed Data: x%x\n",
+			 cmdcode);
+	return 1;
+}
+
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&vport->work_port_lock, iflag);
+	tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ *      handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+	vport->fc_flag &= ~FC_DISC_DELAYED;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
+void
+lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	lpfc_vpd_t *vp = &phba->vpd;
+	uint32_t b1, b2, b3, b4, i, rev;
+	char c;
+	uint32_t *ptr, str[4];
+	uint8_t *fwname;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
+	else if (vp->rev.rBit) {
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
+			rev = vp->rev.sli2FwRev;
+		else
+			rev = vp->rev.sli1FwRev;
+
+		b1 = (rev & 0x0000f000) >> 12;
+		b2 = (rev & 0x00000f00) >> 8;
+		b3 = (rev & 0x000000c0) >> 6;
+		b4 = (rev & 0x00000030) >> 4;
+
+		switch (b4) {
+		case 0:
+			c = 'N';
+			break;
+		case 1:
+			c = 'A';
+			break;
+		case 2:
+			c = 'B';
+			break;
+		case 3:
+			c = 'X';
+			break;
+		default:
+			c = 0;
+			break;
+		}
+		b4 = (rev & 0x0000000f);
+
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
+			fwname = vp->rev.sli2FwName;
+		else
+			fwname = vp->rev.sli1FwName;
+
+		for (i = 0; i < 16; i++)
+			if (fwname[i] == 0x20)
+				fwname[i] = 0;
+
+		ptr = (uint32_t*)fwname;
+
+		for (i = 0; i < 3; i++)
+			str[i] = be32_to_cpu(*ptr++);
+
+		if (c == 0) {
+			if (flag)
+				sprintf(fwrevision, "%d.%d%d (%s)",
+					b1, b2, b3, (char *)str);
+			else
+				sprintf(fwrevision, "%d.%d%d", b1,
+					b2, b3);
+		} else {
+			if (flag)
+				sprintf(fwrevision, "%d.%d%d%c%d (%s)",
+					b1, b2, b3, c,
+					b4, (char *)str);
+			else
+				sprintf(fwrevision, "%d.%d%d%c%d",
+					b1, b2, b3, c, b4);
+		}
+	} else {
+		rev = vp->rev.smFwRev;
+
+		b1 = (rev & 0xff000000) >> 24;
+		b2 = (rev & 0x00f00000) >> 20;
+		b3 = (rev & 0x000f0000) >> 16;
+		c  = (rev & 0x0000ff00) >> 8;
+		b4 = (rev & 0x000000ff);
+
+		sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
+	}
+	return;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_debugfs.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 0000000..d50c481
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,5927 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2007-2015 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_bsg.h"
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/*
+ * debugfs interface
+ *
+ * To access this interface the user should:
+ * # mount -t debugfs none /sys/kernel/debug
+ *
+ * The lpfc debugfs directory hierarchy is:
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
+ * where Y is the vport VPI on that hba
+ *
+ * Debugging services available per vport:
+ * discovery_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
+ * See lpfc_debugfs.h for different categories of  discovery events.
+ * To enable the discovery trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1         Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_disc_trc=X   Where X is the event trace depth for
+ *                               EACH vport. X MUST also be a power of 2.
+ * lpfc_debugfs_mask_disc_trc=Y  Where Y is an event mask as defined in
+ *                               lpfc_debugfs.h .
+ *
+ * slow_ring_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
+ * To enable the slow ring trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1         Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_slow_ring_trc=X   Where X is the event trace depth for
+ *                               the HBA. X MUST also be a power of 2.
+ */
+static int lpfc_debugfs_enable = 1;
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_disc_trc;
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
+	"Set debugfs discovery trace depth");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_slow_ring_trc;
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
+	"Set debugfs slow ring trace depth");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_nvmeio_trc;
+module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444);
+MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc,
+		 "Set debugfs NVME IO trace depth");
+
+static int lpfc_debugfs_mask_disc_trc;
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+	"Set debugfs discovery trace mask");
+
+#include <linux/debugfs.h>
+
+static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+static unsigned long lpfc_debugfs_start_time = 0L;
+
+/* iDiag */
+static struct lpfc_idiag idiag;
+
+/**
+ * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
+ * @vport: The vport to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc discovery debugfs data from the @vport and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Discovery logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	int i, index, len, enable;
+	uint32_t ms;
+	struct lpfc_debugfs_trc *dtp;
+	char *buffer;
+
+	buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+	if (!buffer)
+		return 0;
+
+	enable = lpfc_debugfs_enable;
+	lpfc_debugfs_enable = 0;
+
+	len = 0;
+	index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+		(lpfc_debugfs_max_disc_trc - 1);
+	for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+		dtp = vport->disc_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+	for (i = 0; i < index; i++) {
+		dtp = vport->disc_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+
+	lpfc_debugfs_enable = enable;
+	kfree(buffer);
+
+	return len;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer
+ * @phba: The HBA to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc slow ring debugfs data from the @phba and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Slow ring logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int i, index, len, enable;
+	uint32_t ms;
+	struct lpfc_debugfs_trc *dtp;
+	char *buffer;
+
+	buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+	if (!buffer)
+		return 0;
+
+	enable = lpfc_debugfs_enable;
+	lpfc_debugfs_enable = 0;
+
+	len = 0;
+	index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
+		(lpfc_debugfs_max_slow_ring_trc - 1);
+	for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+		dtp = phba->slow_ring_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+	for (i = 0; i < index; i++) {
+		dtp = phba->slow_ring_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+
+	lpfc_debugfs_enable = enable;
+	kfree(buffer);
+
+	return len;
+}
+
+static int lpfc_debugfs_last_hbq = -1;
+
+/**
+ * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the host buffer queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hbq state will be
+ * dumped to @buf first and then info on each hbq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hbq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured HBQ each time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int len = 0;
+	int i, j, found, posted, low;
+	uint32_t phys, raw_index, getidx;
+	struct lpfc_hbq_init *hip;
+	struct hbq_s *hbqs;
+	struct lpfc_hbq_entry *hbqe;
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *hbq_buf;
+
+	if (phba->sli_rev != 3)
+		return 0;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* toggle between multiple hbqs, if any */
+	i = lpfc_sli_hbq_count();
+	if (i > 1) {
+		 lpfc_debugfs_last_hbq++;
+		 if (lpfc_debugfs_last_hbq >= i)
+			lpfc_debugfs_last_hbq = 0;
+	}
+	else
+		lpfc_debugfs_last_hbq = 0;
+
+	i = lpfc_debugfs_last_hbq;
+
+	len +=  snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+
+	hbqs =  &phba->hbqs[i];
+	posted = 0;
+	list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list)
+		posted++;
+
+	hip =  lpfc_hbq_defs[i];
+	len +=  snprintf(buf+len, size-len,
+		"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
+		hip->hbq_index, hip->profile, hip->rn,
+		hip->buffer_count, hip->init_count, hip->add_count, posted);
+
+	raw_index = phba->hbq_get[i];
+	getidx = le32_to_cpu(raw_index);
+	len +=  snprintf(buf+len, size-len,
+		"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
+		hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
+		hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
+
+	hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
+	for (j=0; j<hbqs->entry_count; j++) {
+		len +=  snprintf(buf+len, size-len,
+			"%03d: %08x %04x %05x ", j,
+			le32_to_cpu(hbqe->bde.addrLow),
+			le32_to_cpu(hbqe->bde.tus.w),
+			le32_to_cpu(hbqe->buffer_tag));
+		i = 0;
+		found = 0;
+
+		/* First calculate if slot has an associated posted buffer */
+		low = hbqs->hbqPutIdx - posted;
+		if (low >= 0) {
+			if ((j >= hbqs->hbqPutIdx) || (j < low)) {
+				len +=  snprintf(buf+len, size-len, "Unused\n");
+				goto skipit;
+			}
+		}
+		else {
+			if ((j >= hbqs->hbqPutIdx) &&
+				(j < (hbqs->entry_count+low))) {
+				len +=  snprintf(buf+len, size-len, "Unused\n");
+				goto skipit;
+			}
+		}
+
+		/* Get the Buffer info for the posted buffer */
+		list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
+			if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
+				len +=  snprintf(buf+len, size-len,
+					"Buf%d: %p %06x\n", i,
+					hbq_buf->dbuf.virt, hbq_buf->tag);
+				found = 1;
+				break;
+			}
+			i++;
+		}
+		if (!found) {
+			len +=  snprintf(buf+len, size-len, "No DMAinfo?\n");
+		}
+skipit:
+		hbqe++;
+		if (len > LPFC_HBQINFO_SIZE - 54)
+			break;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return len;
+}
+
+static int lpfc_debugfs_last_hba_slim_off;
+
+/**
+ * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer
+ * @phba: The HBA to gather SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of HBA SLIM for the HBA associated
+ * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
+ *
+ * Notes:
+ * This routine will only dump up to 1024 bytes of data each time called and
+ * should be called multiple times to dump the entire HBA SLIM.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int len = 0;
+	int i, off;
+	uint32_t *ptr;
+	char *buffer;
+
+	buffer = kmalloc(1024, GFP_KERNEL);
+	if (!buffer)
+		return 0;
+
+	off = 0;
+	spin_lock_irq(&phba->hbalock);
+
+	len +=  snprintf(buf+len, size-len, "HBA SLIM\n");
+	lpfc_memcpy_from_slim(buffer,
+		phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
+
+	ptr = (uint32_t *)&buffer[0];
+	off = lpfc_debugfs_last_hba_slim_off;
+
+	/* Set it up for the next time */
+	lpfc_debugfs_last_hba_slim_off += 1024;
+	if (lpfc_debugfs_last_hba_slim_off >= 4096)
+		lpfc_debugfs_last_hba_slim_off = 0;
+
+	i = 1024;
+	while (i > 0) {
+		len +=  snprintf(buf+len, size-len,
+		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+		*(ptr+5), *(ptr+6), *(ptr+7));
+		ptr += 8;
+		i -= (8 * sizeof(uint32_t));
+		off += (8 * sizeof(uint32_t));
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	kfree(buffer);
+
+	return len;
+}
+
+/**
+ * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer
+ * @phba: The HBA to gather Host SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of host SLIM for the host associated
+ * with @phba to @buf up to @size bytes of data. The dump will contain the
+ * Mailbox, PCB, Rings, and Registers that are located in host memory.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int len = 0;
+	int i, off;
+	uint32_t word0, word1, word2, word3;
+	uint32_t *ptr;
+	struct lpfc_pgp *pgpp;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+
+	off = 0;
+	spin_lock_irq(&phba->hbalock);
+
+	len +=  snprintf(buf+len, size-len, "SLIM Mailbox\n");
+	ptr = (uint32_t *)phba->slim2p.virt;
+	i = sizeof(MAILBOX_t);
+	while (i > 0) {
+		len +=  snprintf(buf+len, size-len,
+		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+		*(ptr+5), *(ptr+6), *(ptr+7));
+		ptr += 8;
+		i -= (8 * sizeof(uint32_t));
+		off += (8 * sizeof(uint32_t));
+	}
+
+	len +=  snprintf(buf+len, size-len, "SLIM PCB\n");
+	ptr = (uint32_t *)phba->pcb;
+	i = sizeof(PCB_t);
+	while (i > 0) {
+		len +=  snprintf(buf+len, size-len,
+		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+		*(ptr+5), *(ptr+6), *(ptr+7));
+		ptr += 8;
+		i -= (8 * sizeof(uint32_t));
+		off += (8 * sizeof(uint32_t));
+	}
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		for (i = 0; i < 4; i++) {
+			pgpp = &phba->port_gp[i];
+			pring = &psli->sli3_ring[i];
+			len +=  snprintf(buf+len, size-len,
+					 "Ring %d: CMD GetInx:%d "
+					 "(Max:%d Next:%d "
+					 "Local:%d flg:x%x)  "
+					 "RSP PutInx:%d Max:%d\n",
+					 i, pgpp->cmdGetInx,
+					 pring->sli.sli3.numCiocb,
+					 pring->sli.sli3.next_cmdidx,
+					 pring->sli.sli3.local_getidx,
+					 pring->flag, pgpp->rspPutInx,
+					 pring->sli.sli3.numRiocb);
+		}
+
+		word0 = readl(phba->HAregaddr);
+		word1 = readl(phba->CAregaddr);
+		word2 = readl(phba->HSregaddr);
+		word3 = readl(phba->HCregaddr);
+		len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+				 "HC:%08x\n", word0, word1, word2, word3);
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return len;
+}
+
+/**
+ * lpfc_debugfs_nodelist_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current target node list associated with @vport to
+ * @buf up to @size bytes of data. Each node entry in the dump will contain a
+ * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	int len = 0;
+	int cnt;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_nodelist *ndlp;
+	unsigned char *statep;
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct nvme_fc_remote_port *nrport;
+
+	cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+
+	len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!cnt) {
+			len +=  snprintf(buf+len, size-len,
+				"Missing Nodelist Entries\n");
+			break;
+		}
+		cnt--;
+		switch (ndlp->nlp_state) {
+		case NLP_STE_UNUSED_NODE:
+			statep = "UNUSED";
+			break;
+		case NLP_STE_PLOGI_ISSUE:
+			statep = "PLOGI ";
+			break;
+		case NLP_STE_ADISC_ISSUE:
+			statep = "ADISC ";
+			break;
+		case NLP_STE_REG_LOGIN_ISSUE:
+			statep = "REGLOG";
+			break;
+		case NLP_STE_PRLI_ISSUE:
+			statep = "PRLI  ";
+			break;
+		case NLP_STE_LOGO_ISSUE:
+			statep = "LOGO  ";
+			break;
+		case NLP_STE_UNMAPPED_NODE:
+			statep = "UNMAP ";
+			break;
+		case NLP_STE_MAPPED_NODE:
+			statep = "MAPPED";
+			break;
+		case NLP_STE_NPR_NODE:
+			statep = "NPR   ";
+			break;
+		default:
+			statep = "UNKNOWN";
+		}
+		len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+				statep, ndlp->nlp_DID);
+		len += snprintf(buf+len, size-len,
+				"WWPN x%llx ",
+				wwn_to_u64(ndlp->nlp_portname.u.wwn));
+		len += snprintf(buf+len, size-len,
+				"WWNN x%llx ",
+				wwn_to_u64(ndlp->nlp_nodename.u.wwn));
+		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
+			len += snprintf(buf+len, size-len, "RPI:%03d ",
+					ndlp->nlp_rpi);
+		else
+			len += snprintf(buf+len, size-len, "RPI:none ");
+		len +=  snprintf(buf+len, size-len, "flag:x%08x ",
+			ndlp->nlp_flag);
+		if (!ndlp->nlp_type)
+			len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+		if (ndlp->nlp_type & NLP_FC_NODE)
+			len += snprintf(buf+len, size-len, "FC_NODE ");
+		if (ndlp->nlp_type & NLP_FABRIC)
+			len += snprintf(buf+len, size-len, "FABRIC ");
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+				ndlp->nlp_sid);
+		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+			len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+		if (ndlp->nlp_type & NLP_NVME_TARGET)
+			len += snprintf(buf + len,
+					size - len, "NVME_TGT sid:%d ",
+					NLP_NO_SID);
+		if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+			len += snprintf(buf + len,
+					size - len, "NVME_INITIATOR ");
+		len += snprintf(buf+len, size-len, "usgmap:%x ",
+			ndlp->nlp_usg_map);
+		len += snprintf(buf+len, size-len, "refcnt:%x",
+			kref_read(&ndlp->kref));
+		len +=  snprintf(buf+len, size-len, "\n");
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
+		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+		len += snprintf(buf + len, size - len,
+				"\nNVME Targetport Entry ...\n");
+
+		/* Port state is only one of two values for now. */
+		if (phba->targetport->port_id)
+			statep = "REGISTERED";
+		else
+			statep = "INIT";
+		len += snprintf(buf + len, size - len,
+				"TGT WWNN x%llx WWPN x%llx State %s\n",
+				wwn_to_u64(vport->fc_nodename.u.wwn),
+				wwn_to_u64(vport->fc_portname.u.wwn),
+				statep);
+		len += snprintf(buf + len, size - len,
+				"    Targetport DID x%06x\n",
+				phba->targetport->port_id);
+		goto out_exit;
+	}
+
+	len += snprintf(buf + len, size - len,
+				"\nNVME Lport/Rport Entries ...\n");
+
+	localport = vport->localport;
+	if (!localport)
+		goto out_exit;
+
+	spin_lock_irq(shost->host_lock);
+
+	/* Port state is only one of two values for now. */
+	if (localport->port_id)
+		statep = "ONLINE";
+	else
+		statep = "UNKNOWN ";
+
+	len += snprintf(buf + len, size - len,
+			"Lport DID x%06x PortState %s\n",
+			localport->port_id, statep);
+
+	len += snprintf(buf + len, size - len, "\tRport List:\n");
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		/* local short-hand pointer. */
+		if (!ndlp->nrport)
+			continue;
+
+		nrport = ndlp->nrport->remoteport;
+
+		/* Port state is only one of two values for now. */
+		switch (nrport->port_state) {
+		case FC_OBJSTATE_ONLINE:
+			statep = "ONLINE";
+			break;
+		case FC_OBJSTATE_UNKNOWN:
+			statep = "UNKNOWN ";
+			break;
+		default:
+			statep = "UNSUPPORTED";
+			break;
+		}
+
+		/* Tab in to show lport ownership. */
+		len += snprintf(buf + len, size - len,
+				"\t%s Port ID:x%06x ",
+				statep, nrport->port_id);
+		len += snprintf(buf + len, size - len, "WWPN x%llx ",
+				nrport->port_name);
+		len += snprintf(buf + len, size - len, "WWNN x%llx ",
+				nrport->node_name);
+
+		/* An NVME rport can have multiple roles. */
+		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
+			len +=  snprintf(buf + len, size - len,
+					 "INITIATOR ");
+		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
+			len +=  snprintf(buf + len, size - len,
+					 "TARGET ");
+		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
+			len +=  snprintf(buf + len, size - len,
+					 "DISCSRVC ");
+		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+					  FC_PORT_ROLE_NVME_TARGET |
+					  FC_PORT_ROLE_NVME_DISCOVERY))
+			len +=  snprintf(buf + len, size - len,
+					 "UNKNOWN ROLE x%x",
+					 nrport->port_role);
+		/* Terminate the string. */
+		len +=  snprintf(buf + len, size - len, "\n");
+	}
+
+	spin_unlock_irq(shost->host_lock);
+ out_exit:
+	return len;
+}
+
+/**
+ * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+	uint64_t tot, data1, data2, data3;
+	int len = 0;
+	int cnt;
+
+	if (phba->nvmet_support) {
+		if (!phba->targetport)
+			return len;
+		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+		len += snprintf(buf + len, size - len,
+				"\nNVME Targetport Statistics\n");
+
+		len += snprintf(buf + len, size - len,
+				"LS: Rcv %08x Drop %08x Abort %08x\n",
+				atomic_read(&tgtp->rcv_ls_req_in),
+				atomic_read(&tgtp->rcv_ls_req_drop),
+				atomic_read(&tgtp->xmt_ls_abort));
+		if (atomic_read(&tgtp->rcv_ls_req_in) !=
+		    atomic_read(&tgtp->rcv_ls_req_out)) {
+			len += snprintf(buf + len, size - len,
+					"Rcv LS: in %08x != out %08x\n",
+					atomic_read(&tgtp->rcv_ls_req_in),
+					atomic_read(&tgtp->rcv_ls_req_out));
+		}
+
+		len += snprintf(buf + len, size - len,
+				"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+				atomic_read(&tgtp->xmt_ls_rsp),
+				atomic_read(&tgtp->xmt_ls_drop),
+				atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+				atomic_read(&tgtp->xmt_ls_rsp_error));
+
+		len += snprintf(buf + len, size - len,
+				"FCP: Rcv %08x Defer %08x Release %08x "
+				"Drop %08x\n",
+				atomic_read(&tgtp->rcv_fcp_cmd_in),
+				atomic_read(&tgtp->rcv_fcp_cmd_defer),
+				atomic_read(&tgtp->xmt_fcp_release),
+				atomic_read(&tgtp->rcv_fcp_cmd_drop));
+
+		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
+		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
+			len += snprintf(buf + len, size - len,
+					"Rcv FCP: in %08x != out %08x\n",
+					atomic_read(&tgtp->rcv_fcp_cmd_in),
+					atomic_read(&tgtp->rcv_fcp_cmd_out));
+		}
+
+		len += snprintf(buf + len, size - len,
+				"FCP Rsp: read %08x readrsp %08x "
+				"write %08x rsp %08x\n",
+				atomic_read(&tgtp->xmt_fcp_read),
+				atomic_read(&tgtp->xmt_fcp_read_rsp),
+				atomic_read(&tgtp->xmt_fcp_write),
+				atomic_read(&tgtp->xmt_fcp_rsp));
+
+		len += snprintf(buf + len, size - len,
+				"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+				atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+				atomic_read(&tgtp->xmt_fcp_rsp_error),
+				atomic_read(&tgtp->xmt_fcp_rsp_drop));
+
+		len += snprintf(buf + len, size - len,
+				"ABORT: Xmt %08x Cmpl %08x\n",
+				atomic_read(&tgtp->xmt_fcp_abort),
+				atomic_read(&tgtp->xmt_fcp_abort_cmpl));
+
+		len += snprintf(buf + len, size - len,
+				"ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
+				atomic_read(&tgtp->xmt_abort_sol),
+				atomic_read(&tgtp->xmt_abort_unsol),
+				atomic_read(&tgtp->xmt_abort_rsp),
+				atomic_read(&tgtp->xmt_abort_rsp_error));
+
+		len +=  snprintf(buf + len, size - len, "\n");
+
+		cnt = 0;
+		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		list_for_each_entry_safe(ctxp, next_ctxp,
+				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+				list) {
+			cnt++;
+		}
+		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		if (cnt) {
+			len += snprintf(buf + len, size - len,
+					"ABORT: %d ctx entries\n", cnt);
+			spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+			list_for_each_entry_safe(ctxp, next_ctxp,
+				    &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+				    list) {
+				if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
+					break;
+				len += snprintf(buf + len, size - len,
+						"Entry: oxid %x state %x "
+						"flag %x\n",
+						ctxp->oxid, ctxp->state,
+						ctxp->flag);
+			}
+			spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		}
+
+		/* Calculate outstanding IOs */
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+		tot += atomic_read(&tgtp->xmt_fcp_release);
+		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
+
+		len += snprintf(buf + len, size - len,
+				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
+				"CTX Outstanding %08llx\n",
+				phba->sli4_hba.nvmet_xri_cnt,
+				phba->sli4_hba.nvmet_io_wait_cnt,
+				phba->sli4_hba.nvmet_io_wait_total,
+				tot);
+	} else {
+		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+			return len;
+
+		len += snprintf(buf + len, size - len,
+				"\nNVME Lport Statistics\n");
+
+		len += snprintf(buf + len, size - len,
+				"LS: Xmt %016x Cmpl %016x\n",
+				atomic_read(&phba->fc4NvmeLsRequests),
+				atomic_read(&phba->fc4NvmeLsCmpls));
+
+		tot = atomic_read(&phba->fc4NvmeIoCmpls);
+		data1 = atomic_read(&phba->fc4NvmeInputRequests);
+		data2 = atomic_read(&phba->fc4NvmeOutputRequests);
+		data3 = atomic_read(&phba->fc4NvmeControlRequests);
+
+		len += snprintf(buf + len, size - len,
+				"FCP: Rd %016llx Wr %016llx IO %016llx\n",
+				data1, data2, data3);
+
+		len += snprintf(buf + len, size - len,
+				"    Cmpl %016llx Outstanding %016llx\n",
+				tot, (data1 + data2 + data3) - tot);
+	}
+
+	return len;
+}
+
+
+/**
+ * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	int len = 0;
+
+	if (phba->nvmet_support == 0) {
+		/* NVME Initiator */
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"ktime %s: Total Samples: %lld\n",
+				(phba->ktime_on ?  "Enabled" : "Disabled"),
+				phba->ktime_data_samples);
+		if (phba->ktime_data_samples == 0)
+			return len;
+
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"Segment 1: Last NVME Cmd cmpl "
+			"done -to- Start of next NVME cnd (in driver)\n");
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg1_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg1_min,
+			phba->ktime_seg1_max);
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"Segment 2: Driver start of NVME cmd "
+			"-to- Firmware WQ doorbell\n");
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg2_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg2_min,
+			phba->ktime_seg2_max);
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"Segment 3: Firmware WQ doorbell -to- "
+			"MSI-X ISR cmpl\n");
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg3_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg3_min,
+			phba->ktime_seg3_max);
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"Segment 4: MSI-X ISR cmpl -to- "
+			"NVME cmpl done\n");
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg4_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg4_min,
+			phba->ktime_seg4_max);
+		len += snprintf(
+			buf + len, PAGE_SIZE - len,
+			"Total IO avg time: %08lld\n",
+			div_u64(phba->ktime_seg1_total +
+			phba->ktime_seg2_total  +
+			phba->ktime_seg3_total +
+			phba->ktime_seg4_total,
+			phba->ktime_data_samples));
+		return len;
+	}
+
+	/* NVME Target */
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"ktime %s: Total Samples: %lld %lld\n",
+			(phba->ktime_on ? "Enabled" : "Disabled"),
+			phba->ktime_data_samples,
+			phba->ktime_status_samples);
+	if (phba->ktime_data_samples == 0)
+		return len;
+
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 1: MSI-X ISR Rcv cmd -to- "
+			"cmd pass to NVME Layer\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg1_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg1_min,
+			phba->ktime_seg1_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 2: cmd pass to NVME Layer- "
+			"-to- Driver rcv cmd OP (action)\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg2_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg2_min,
+			phba->ktime_seg2_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 3: Driver rcv cmd OP -to- "
+			"Firmware WQ doorbell: cmd\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg3_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg3_min,
+			phba->ktime_seg3_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 4: Firmware WQ doorbell: cmd "
+			"-to- MSI-X ISR for cmd cmpl\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg4_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg4_min,
+			phba->ktime_seg4_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 5: MSI-X ISR for cmd cmpl "
+			"-to- NVME layer passed cmd done\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg5_total,
+				phba->ktime_data_samples),
+			phba->ktime_seg5_min,
+			phba->ktime_seg5_max);
+
+	if (phba->ktime_status_samples == 0) {
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"Total: cmd received by MSI-X ISR "
+				"-to- cmd completed on wire\n");
+		len += snprintf(buf + len, PAGE_SIZE-len,
+				"avg:%08lld min:%08lld "
+				"max %08lld\n",
+				div_u64(phba->ktime_seg10_total,
+					phba->ktime_data_samples),
+				phba->ktime_seg10_min,
+				phba->ktime_seg10_max);
+		return len;
+	}
+
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 6: NVME layer passed cmd done "
+			"-to- Driver rcv rsp status OP\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg6_total,
+				phba->ktime_status_samples),
+			phba->ktime_seg6_min,
+			phba->ktime_seg6_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 7: Driver rcv rsp status OP "
+			"-to- Firmware WQ doorbell: status\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg7_total,
+				phba->ktime_status_samples),
+			phba->ktime_seg7_min,
+			phba->ktime_seg7_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 8: Firmware WQ doorbell: status"
+			" -to- MSI-X ISR for status cmpl\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg8_total,
+				phba->ktime_status_samples),
+			phba->ktime_seg8_min,
+			phba->ktime_seg8_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Segment 9: MSI-X ISR for status cmpl  "
+			"-to- NVME layer passed status done\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg9_total,
+				phba->ktime_status_samples),
+			phba->ktime_seg9_min,
+			phba->ktime_seg9_max);
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"Total: cmd received by MSI-X ISR -to- "
+			"cmd completed on wire\n");
+	len += snprintf(buf + len, PAGE_SIZE-len,
+			"avg:%08lld min:%08lld max %08lld\n",
+			div_u64(phba->ktime_seg10_total,
+				phba->ktime_status_samples),
+			phba->ktime_seg10_min,
+			phba->ktime_seg10_max);
+	return len;
+}
+
+/**
+ * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer
+ * @phba: The phba to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME IO trace associated with @phba
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	struct lpfc_debugfs_nvmeio_trc *dtp;
+	int i, state, index, skip;
+	int len = 0;
+
+	state = phba->nvmeio_trc_on;
+
+	index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) &
+		(phba->nvmeio_trc_size - 1);
+	skip = phba->nvmeio_trc_output_idx;
+
+	len += snprintf(buf + len, size - len,
+			"%s IO Trace %s: next_idx %d skip %d size %d\n",
+			(phba->nvmet_support ? "NVME" : "NVMET"),
+			(state ? "Enabled" : "Disabled"),
+			index, skip, phba->nvmeio_trc_size);
+
+	if (!phba->nvmeio_trc || state)
+		return len;
+
+	/* trace MUST bhe off to continue */
+
+	for (i = index; i < phba->nvmeio_trc_size; i++) {
+		if (skip) {
+			skip--;
+			continue;
+		}
+		dtp = phba->nvmeio_trc + i;
+		phba->nvmeio_trc_output_idx++;
+
+		if (!dtp->fmt)
+			continue;
+
+		len +=  snprintf(buf + len, size - len, dtp->fmt,
+			dtp->data1, dtp->data2, dtp->data3);
+
+		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
+			phba->nvmeio_trc_output_idx = 0;
+			len += snprintf(buf + len, size - len,
+					"Trace Complete\n");
+			goto out;
+		}
+
+		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
+			len += snprintf(buf + len, size - len,
+					"Trace Continue (%d of %d)\n",
+					phba->nvmeio_trc_output_idx,
+					phba->nvmeio_trc_size);
+			goto out;
+		}
+	}
+	for (i = 0; i < index; i++) {
+		if (skip) {
+			skip--;
+			continue;
+		}
+		dtp = phba->nvmeio_trc + i;
+		phba->nvmeio_trc_output_idx++;
+
+		if (!dtp->fmt)
+			continue;
+
+		len +=  snprintf(buf + len, size - len, dtp->fmt,
+			dtp->data1, dtp->data2, dtp->data3);
+
+		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
+			phba->nvmeio_trc_output_idx = 0;
+			len += snprintf(buf + len, size - len,
+					"Trace Complete\n");
+			goto out;
+		}
+
+		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
+			len += snprintf(buf + len, size - len,
+					"Trace Continue (%d of %d)\n",
+					phba->nvmeio_trc_output_idx,
+					phba->nvmeio_trc_size);
+			goto out;
+		}
+	}
+
+	len += snprintf(buf + len, size - len,
+			"Trace Done\n");
+out:
+	return len;
+}
+
+/**
+ * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	int i;
+	int len = 0;
+	uint32_t tot_xmt = 0;
+	uint32_t tot_rcv = 0;
+	uint32_t tot_cmpl = 0;
+	uint32_t tot_ccmpl = 0;
+
+	if (phba->nvmet_support == 0) {
+		/* NVME Initiator */
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"CPUcheck %s\n",
+				(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
+					"Enabled" : "Disabled"));
+		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+			if (i >= LPFC_CHECK_CPU_CNT)
+				break;
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"%02d: xmit x%08x cmpl x%08x\n",
+					i, phba->cpucheck_xmt_io[i],
+					phba->cpucheck_cmpl_io[i]);
+			tot_xmt += phba->cpucheck_xmt_io[i];
+			tot_cmpl += phba->cpucheck_cmpl_io[i];
+		}
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"tot:xmit x%08x cmpl x%08x\n",
+				tot_xmt, tot_cmpl);
+		return len;
+	}
+
+	/* NVME Target */
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"CPUcheck %s ",
+			(phba->cpucheck_on & LPFC_CHECK_NVMET_IO ?
+				"IO Enabled - " : "IO Disabled - "));
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"%s\n",
+			(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
+				"Rcv Enabled\n" : "Rcv Disabled\n"));
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		if (i >= LPFC_CHECK_CPU_CNT)
+			break;
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"%02d: xmit x%08x ccmpl x%08x "
+				"cmpl x%08x rcv x%08x\n",
+				i, phba->cpucheck_xmt_io[i],
+				phba->cpucheck_ccmpl_io[i],
+				phba->cpucheck_cmpl_io[i],
+				phba->cpucheck_rcv_io[i]);
+		tot_xmt += phba->cpucheck_xmt_io[i];
+		tot_rcv += phba->cpucheck_rcv_io[i];
+		tot_cmpl += phba->cpucheck_cmpl_io[i];
+		tot_ccmpl += phba->cpucheck_ccmpl_io[i];
+	}
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n",
+			tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv);
+	return len;
+}
+
+#endif
+
+/**
+ * lpfc_debugfs_disc_trc - Store discovery trace log
+ * @vport: The vport to associate this trace string with for retrieval.
+ * @mask: Log entry classification.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. Only entries with a @mask that
+ * match the current debugfs discovery mask will be saved. Entries that do not
+ * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
+ * printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+	uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_debugfs_trc *dtp;
+	int index;
+
+	if (!(lpfc_debugfs_mask_disc_trc & mask))
+		return;
+
+	if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
+		!vport || !vport->disc_trc)
+		return;
+
+	index = atomic_inc_return(&vport->disc_trc_cnt) &
+		(lpfc_debugfs_max_disc_trc - 1);
+	dtp = vport->disc_trc + index;
+	dtp->fmt = fmt;
+	dtp->data1 = data1;
+	dtp->data2 = data2;
+	dtp->data3 = data3;
+	dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+	dtp->jif = jiffies;
+#endif
+	return;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc - Store slow ring trace log
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
+	uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_debugfs_trc *dtp;
+	int index;
+
+	if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc ||
+		!phba || !phba->slow_ring_trc)
+		return;
+
+	index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
+		(lpfc_debugfs_max_slow_ring_trc - 1);
+	dtp = phba->slow_ring_trc + index;
+	dtp->fmt = fmt;
+	dtp->data1 = data1;
+	dtp->data2 = data2;
+	dtp->data3 = data3;
+	dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+	dtp->jif = jiffies;
+#endif
+	return;
+}
+
+/**
+ * lpfc_debugfs_nvme_trc - Store NVME/NVMET trace log
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * nvme trace buffer associated with @phba. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt,
+		      uint16_t data1, uint16_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_debugfs_nvmeio_trc *dtp;
+	int index;
+
+	if (!phba->nvmeio_trc_on || !phba->nvmeio_trc)
+		return;
+
+	index = atomic_inc_return(&phba->nvmeio_trc_cnt) &
+		(phba->nvmeio_trc_size - 1);
+	dtp = phba->nvmeio_trc + index;
+	dtp->fmt = fmt;
+	dtp->data1 = data1;
+	dtp->data2 = data2;
+	dtp->data3 = data3;
+#endif
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_debugfs_disc_trc_open - Open the discovery trace log
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int size;
+	int rc = -ENOMEM;
+
+	if (!lpfc_debugfs_max_disc_trc) {
+		 rc = -ENOSPC;
+		goto out;
+	}
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	size =  (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
+	size = PAGE_ALIGN(size);
+
+	debug->buffer = kmalloc(size, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int size;
+	int rc = -ENOMEM;
+
+	if (!lpfc_debugfs_max_slow_ring_trc) {
+		 rc = -ENOSPC;
+		goto out;
+	}
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	size =  (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
+	size = PAGE_ALIGN(size);
+
+	debug->buffer = kmalloc(size, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer,
+		LPFC_HBQINFO_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
+		LPFC_DUMPHBASLIM_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
+		LPFC_DUMPHOSTSLIM_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	if (!_dump_buf_data)
+		return -EBUSY;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	pr_err("9059 BLKGRD:  %s: _dump_buf_data=0x%p\n",
+			__func__, _dump_buf_data);
+	debug->buffer = _dump_buf_data;
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	if (!_dump_buf_dif)
+		return -EBUSY;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
+			__func__, _dump_buf_dif, file);
+	debug->buffer = _dump_buf_dif;
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
+		  size_t nbytes, loff_t *ppos)
+{
+	/*
+	 * The Data/DIF buffers only save one failing IO
+	 * The write op is used as a reset mechanism after an IO has
+	 * already been saved to the next one can be saved
+	 */
+	spin_lock(&_dump_buf_lock);
+
+	memset((void *)_dump_buf_data, 0,
+			((1 << PAGE_SHIFT) << _dump_buf_data_order));
+	memset((void *)_dump_buf_dif, 0,
+			((1 << PAGE_SHIFT) << _dump_buf_dif_order));
+
+	_dump_buf_done = 0;
+
+	spin_unlock(&_dump_buf_lock);
+
+	return nbytes;
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
+	size_t nbytes, loff_t *ppos)
+{
+	struct dentry *dent = file->f_path.dentry;
+	struct lpfc_hba *phba = file->private_data;
+	char cbuf[32];
+	uint64_t tmp = 0;
+	int cnt = 0;
+
+	if (dent == phba->debug_writeGuard)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+	else if (dent == phba->debug_writeApp)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+	else if (dent == phba->debug_writeRef)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+	else if (dent == phba->debug_readGuard)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+	else if (dent == phba->debug_readApp)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+	else if (dent == phba->debug_readRef)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+	else if (dent == phba->debug_InjErrNPortID)
+		cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+	else if (dent == phba->debug_InjErrWWPN) {
+		memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+		tmp = cpu_to_be64(tmp);
+		cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+	} else if (dent == phba->debug_InjErrLBA) {
+		if (phba->lpfc_injerr_lba == (sector_t)(-1))
+			cnt = snprintf(cbuf, 32, "off\n");
+		else
+			cnt = snprintf(cbuf, 32, "0x%llx\n",
+				 (uint64_t) phba->lpfc_injerr_lba);
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			 "0547 Unknown debugfs error injection entry\n");
+
+	return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
+	size_t nbytes, loff_t *ppos)
+{
+	struct dentry *dent = file->f_path.dentry;
+	struct lpfc_hba *phba = file->private_data;
+	char dstbuf[33];
+	uint64_t tmp = 0;
+	int size;
+
+	memset(dstbuf, 0, 33);
+	size = (nbytes < 32) ? nbytes : 32;
+	if (copy_from_user(dstbuf, buf, size))
+		return 0;
+
+	if (dent == phba->debug_InjErrLBA) {
+		if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+			tmp = (uint64_t)(-1);
+	}
+
+	if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
+		return 0;
+
+	if (dent == phba->debug_writeGuard)
+		phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_writeApp)
+		phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_writeRef)
+		phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readGuard)
+		phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readApp)
+		phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readRef)
+		phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_InjErrLBA)
+		phba->lpfc_injerr_lba = (sector_t)tmp;
+	else if (dent == phba->debug_InjErrNPortID)
+		phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+	else if (dent == phba->debug_InjErrWWPN) {
+		tmp = cpu_to_be64(tmp);
+		memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			 "0548 Unknown debugfs error injection entry\n");
+
+	return nbytes;
+}
+
+static int
+lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+/**
+ * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
+		LPFC_NODELIST_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @off: The offset to seek to or the amount to seek by.
+ * @whence: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation. The
+ * @whence parameter indicates whether @off is the offset to directly seek to,
+ * or if it is a value to seek forward or reverse by. This function figures out
+ * what the new offset of the debugfs file will be and assigns that value to the
+ * f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ **/
+static loff_t
+lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	struct lpfc_debug *debug = file->private_data;
+	return fixed_size_llseek(file, off, whence, debug->len);
+}
+
+/**
+ * lpfc_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from from the buffer indicated in the private_data
+ * field of @file. It will start reading at @ppos and copy up to @nbytes of
+ * data to @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_debugfs_read(struct file *file, char __user *buf,
+		  size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
+				       debug->len);
+}
+
+/**
+ * lpfc_debugfs_release - Release the buffer used to store debugfs file data
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file was
+ * opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	kfree(debug->buffer);
+	kfree(debug);
+
+	return 0;
+}
+
+static int
+lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	debug->buffer = NULL;
+	kfree(debug);
+
+	return 0;
+}
+
+
+static int
+lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	 /* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer,
+		LPFC_NVMESTAT_SIZE);
+
+	debug->i_private = inode->i_private;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nvmet_tgtport *tgtp;
+	char mybuf[64];
+	char *pbuf;
+
+	if (!phba->targetport)
+		return -ENXIO;
+
+	if (nbytes > 64)
+		nbytes = 64;
+
+	memset(mybuf, 0, sizeof(mybuf));
+
+	if (copy_from_user(mybuf, buf, nbytes))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	if ((strncmp(pbuf, "reset", strlen("reset")) == 0) ||
+	    (strncmp(pbuf, "zero", strlen("zero")) == 0)) {
+		atomic_set(&tgtp->rcv_ls_req_in, 0);
+		atomic_set(&tgtp->rcv_ls_req_out, 0);
+		atomic_set(&tgtp->rcv_ls_req_drop, 0);
+		atomic_set(&tgtp->xmt_ls_abort, 0);
+		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
+		atomic_set(&tgtp->xmt_ls_rsp, 0);
+		atomic_set(&tgtp->xmt_ls_drop, 0);
+		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
+
+		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
+		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
+		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
+		atomic_set(&tgtp->xmt_fcp_drop, 0);
+		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
+		atomic_set(&tgtp->xmt_fcp_read, 0);
+		atomic_set(&tgtp->xmt_fcp_write, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp, 0);
+		atomic_set(&tgtp->xmt_fcp_release, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+
+		atomic_set(&tgtp->xmt_fcp_abort, 0);
+		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
+		atomic_set(&tgtp->xmt_abort_sol, 0);
+		atomic_set(&tgtp->xmt_abort_unsol, 0);
+		atomic_set(&tgtp->xmt_abort_rsp, 0);
+		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
+	}
+	return nbytes;
+}
+
+static int
+lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	 /* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
+		LPFC_NVMEKTIME_SIZE);
+
+	debug->i_private = inode->i_private;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
+			     size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+	struct lpfc_hba   *phba = vport->phba;
+	char mybuf[64];
+	char *pbuf;
+
+	if (nbytes > 64)
+		nbytes = 64;
+
+	memset(mybuf, 0, sizeof(mybuf));
+
+	if (copy_from_user(mybuf, buf, nbytes))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+
+	if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+		phba->ktime_data_samples = 0;
+		phba->ktime_status_samples = 0;
+		phba->ktime_seg1_total = 0;
+		phba->ktime_seg1_max = 0;
+		phba->ktime_seg1_min = 0xffffffff;
+		phba->ktime_seg2_total = 0;
+		phba->ktime_seg2_max = 0;
+		phba->ktime_seg2_min = 0xffffffff;
+		phba->ktime_seg3_total = 0;
+		phba->ktime_seg3_max = 0;
+		phba->ktime_seg3_min = 0xffffffff;
+		phba->ktime_seg4_total = 0;
+		phba->ktime_seg4_max = 0;
+		phba->ktime_seg4_min = 0xffffffff;
+		phba->ktime_seg5_total = 0;
+		phba->ktime_seg5_max = 0;
+		phba->ktime_seg5_min = 0xffffffff;
+		phba->ktime_seg6_total = 0;
+		phba->ktime_seg6_max = 0;
+		phba->ktime_seg6_min = 0xffffffff;
+		phba->ktime_seg7_total = 0;
+		phba->ktime_seg7_max = 0;
+		phba->ktime_seg7_min = 0xffffffff;
+		phba->ktime_seg8_total = 0;
+		phba->ktime_seg8_max = 0;
+		phba->ktime_seg8_min = 0xffffffff;
+		phba->ktime_seg9_total = 0;
+		phba->ktime_seg9_max = 0;
+		phba->ktime_seg9_min = 0xffffffff;
+		phba->ktime_seg10_total = 0;
+		phba->ktime_seg10_max = 0;
+		phba->ktime_seg10_min = 0xffffffff;
+
+		phba->ktime_on = 1;
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "off",
+		   sizeof("off") - 1) == 0)) {
+		phba->ktime_on = 0;
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "zero",
+		   sizeof("zero") - 1) == 0)) {
+		phba->ktime_data_samples = 0;
+		phba->ktime_status_samples = 0;
+		phba->ktime_seg1_total = 0;
+		phba->ktime_seg1_max = 0;
+		phba->ktime_seg1_min = 0xffffffff;
+		phba->ktime_seg2_total = 0;
+		phba->ktime_seg2_max = 0;
+		phba->ktime_seg2_min = 0xffffffff;
+		phba->ktime_seg3_total = 0;
+		phba->ktime_seg3_max = 0;
+		phba->ktime_seg3_min = 0xffffffff;
+		phba->ktime_seg4_total = 0;
+		phba->ktime_seg4_max = 0;
+		phba->ktime_seg4_min = 0xffffffff;
+		phba->ktime_seg5_total = 0;
+		phba->ktime_seg5_max = 0;
+		phba->ktime_seg5_min = 0xffffffff;
+		phba->ktime_seg6_total = 0;
+		phba->ktime_seg6_max = 0;
+		phba->ktime_seg6_min = 0xffffffff;
+		phba->ktime_seg7_total = 0;
+		phba->ktime_seg7_max = 0;
+		phba->ktime_seg7_min = 0xffffffff;
+		phba->ktime_seg8_total = 0;
+		phba->ktime_seg8_max = 0;
+		phba->ktime_seg8_min = 0xffffffff;
+		phba->ktime_seg9_total = 0;
+		phba->ktime_seg9_max = 0;
+		phba->ktime_seg9_min = 0xffffffff;
+		phba->ktime_seg10_total = 0;
+		phba->ktime_seg10_max = 0;
+		phba->ktime_seg10_min = 0xffffffff;
+		return strlen(pbuf);
+	}
+	return -EINVAL;
+}
+
+static int
+lpfc_debugfs_nvmeio_trc_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	 /* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_NVMEIO_TRC_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_nvmeio_trc_data(phba, debug->buffer,
+		LPFC_NVMEIO_TRC_SIZE);
+
+	debug->i_private = inode->i_private;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
+			      size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	int i;
+	unsigned long sz;
+	char mybuf[64];
+	char *pbuf;
+
+	if (nbytes > 64)
+		nbytes = 64;
+
+	memset(mybuf, 0, sizeof(mybuf));
+
+	if (copy_from_user(mybuf, buf, nbytes))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+
+	if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0570 nvmeio_trc_off\n");
+		phba->nvmeio_trc_output_idx = 0;
+		phba->nvmeio_trc_on = 0;
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0571 nvmeio_trc_on\n");
+		phba->nvmeio_trc_output_idx = 0;
+		phba->nvmeio_trc_on = 1;
+		return strlen(pbuf);
+	}
+
+	/* We must be off to allocate the trace buffer */
+	if (phba->nvmeio_trc_on != 0)
+		return -EINVAL;
+
+	/* If not on or off, the parameter is the trace buffer size */
+	i = kstrtoul(pbuf, 0, &sz);
+	if (i)
+		return -EINVAL;
+	phba->nvmeio_trc_size = (uint32_t)sz;
+
+	/* It must be a power of 2 - round down */
+	i = 0;
+	while (sz > 1) {
+		sz = sz >> 1;
+		i++;
+	}
+	sz = (1 << i);
+	if (phba->nvmeio_trc_size != sz)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0572 nvmeio_trc_size changed to %ld\n",
+				sz);
+	phba->nvmeio_trc_size = (uint32_t)sz;
+
+	/* If one previously exists, free it */
+	kfree(phba->nvmeio_trc);
+
+	/* Allocate new trace buffer and initialize */
+	phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
+				    sz), GFP_KERNEL);
+	if (!phba->nvmeio_trc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0573 Cannot create debugfs "
+				"nvmeio_trc buffer\n");
+		return -ENOMEM;
+	}
+	memset(phba->nvmeio_trc, 0,
+	       (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz));
+	atomic_set(&phba->nvmeio_trc_cnt, 0);
+	phba->nvmeio_trc_on = 0;
+	phba->nvmeio_trc_output_idx = 0;
+
+	return strlen(pbuf);
+}
+
+static int
+lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	 /* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
+		LPFC_NVMEKTIME_SIZE);
+
+	debug->i_private = inode->i_private;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+			    size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+	struct lpfc_hba   *phba = vport->phba;
+	char mybuf[64];
+	char *pbuf;
+	int i;
+
+	if (nbytes > 64)
+		nbytes = 64;
+
+	memset(mybuf, 0, sizeof(mybuf));
+
+	if (copy_from_user(mybuf, buf, nbytes))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+
+	if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+		if (phba->nvmet_support)
+			phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+		else
+			phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "rcv",
+		   sizeof("rcv") - 1) == 0)) {
+		if (phba->nvmet_support)
+			phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
+		else
+			return -EINVAL;
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "off",
+		   sizeof("off") - 1) == 0)) {
+		phba->cpucheck_on = LPFC_CHECK_OFF;
+		return strlen(pbuf);
+	} else if ((strncmp(pbuf, "zero",
+		   sizeof("zero") - 1) == 0)) {
+		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+			if (i >= LPFC_CHECK_CPU_CNT)
+				break;
+			phba->cpucheck_rcv_io[i] = 0;
+			phba->cpucheck_xmt_io[i] = 0;
+			phba->cpucheck_cmpl_io[i] = 0;
+			phba->cpucheck_ccmpl_io[i] = 0;
+		}
+		return strlen(pbuf);
+	}
+	return -EINVAL;
+}
+
+/*
+ * ---------------------------------
+ * iDiag debugfs file access methods
+ * ---------------------------------
+ *
+ * All access methods are through the proper SLI4 PCI function's debugfs
+ * iDiag directory:
+ *
+ *     /sys/kernel/debug/lpfc/fn<#>/iDiag
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+			      struct lpfc_idiag_cmd *idiag_cmd)
+{
+	char mybuf[64];
+	char *pbuf, *step_str;
+	int i;
+	size_t bsize;
+
+	memset(mybuf, 0, sizeof(mybuf));
+	memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+	bsize = min(nbytes, (sizeof(mybuf)-1));
+
+	if (copy_from_user(mybuf, buf, bsize))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+	step_str = strsep(&pbuf, "\t ");
+
+	/* The opcode must present */
+	if (!step_str)
+		return -EINVAL;
+
+	idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+	if (idiag_cmd->opcode == 0)
+		return -EINVAL;
+
+	for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+		step_str = strsep(&pbuf, "\t ");
+		if (!step_str)
+			return i;
+		idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+	}
+	return i;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		return -ENOMEM;
+
+	debug->i_private = inode->i_private;
+	debug->buffer = NULL;
+	file->private_data = debug;
+
+	return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	/* Free the buffers to the file operation */
+	kfree(debug->buffer);
+	kfree(debug);
+
+	return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case of command for write operation.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	if (debug->op == LPFC_IDIAG_OP_WR) {
+		switch (idiag.cmd.opcode) {
+		case LPFC_IDIAG_CMD_PCICFG_WR:
+		case LPFC_IDIAG_CMD_PCICFG_ST:
+		case LPFC_IDIAG_CMD_PCICFG_CL:
+		case LPFC_IDIAG_CMD_QUEACC_WR:
+		case LPFC_IDIAG_CMD_QUEACC_ST:
+		case LPFC_IDIAG_CMD_QUEACC_CL:
+			memset(&idiag, 0, sizeof(idiag));
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* Free the buffers to the file operation */
+	kfree(debug->buffer);
+	kfree(debug);
+
+	return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+	int where, count;
+	char *pbuffer;
+	struct pci_dev *pdev;
+	uint32_t u32val;
+	uint16_t u16val;
+	uint8_t u8val;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+		where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+		count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+	} else
+		return 0;
+
+	/* Read single PCI config space register */
+	switch (count) {
+	case SIZE_U8: /* byte (8 bits) */
+		pci_read_config_byte(pdev, where, &u8val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%03x: %02x\n", where, u8val);
+		break;
+	case SIZE_U16: /* word (16 bits) */
+		pci_read_config_word(pdev, where, &u16val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%03x: %04x\n", where, u16val);
+		break;
+	case SIZE_U32: /* double word (32 bits) */
+		pci_read_config_dword(pdev, where, &u32val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%03x: %08x\n", where, u32val);
+		break;
+	case LPFC_PCI_CFG_BROWSE: /* browse all */
+		goto pcicfg_browse;
+		break;
+	default:
+		/* illegal count */
+		len = 0;
+		break;
+	}
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+	/* Browse all PCI config space registers */
+	offset_label = idiag.offset.last_rd;
+	offset = offset_label;
+
+	/* Read PCI config space */
+	len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+			"%03x: ", offset_label);
+	while (index > 0) {
+		pci_read_config_dword(pdev, offset, &u32val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%08x ", u32val);
+		offset += sizeof(uint32_t);
+		if (offset >= LPFC_PCI_CFG_SIZE) {
+			len += snprintf(pbuffer+len,
+					LPFC_PCI_CFG_SIZE-len, "\n");
+			break;
+		}
+		index -= sizeof(uint32_t);
+		if (!index)
+			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+					"\n");
+		else if (!(index % (8 * sizeof(uint32_t)))) {
+			offset_label += (8 * sizeof(uint32_t));
+			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+					"\n%03x: ", offset_label);
+		}
+	}
+
+	/* Set up the offset for next portion of pci cfg read */
+	if (index == 0) {
+		idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+		if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+			idiag.offset.last_rd = 0;
+	} else
+		idiag.offset.last_rd = 0;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t where, value, count;
+	uint32_t u32val;
+	uint16_t u16val;
+	uint8_t u8val;
+	struct pci_dev *pdev;
+	int rc;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return -EFAULT;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+		/* Sanity check on PCI config read command line arguments */
+		if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
+			goto error_out;
+		/* Read command from PCI config space, set up command fields */
+		where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+		count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+		if (count == LPFC_PCI_CFG_BROWSE) {
+			if (where % sizeof(uint32_t))
+				goto error_out;
+			/* Starting offset to browse */
+			idiag.offset.last_rd = where;
+		} else if ((count != sizeof(uint8_t)) &&
+			   (count != sizeof(uint16_t)) &&
+			   (count != sizeof(uint32_t)))
+			goto error_out;
+		if (count == sizeof(uint8_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+				goto error_out;
+			if (where % sizeof(uint8_t))
+				goto error_out;
+		}
+		if (count == sizeof(uint16_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+				goto error_out;
+			if (where % sizeof(uint16_t))
+				goto error_out;
+		}
+		if (count == sizeof(uint32_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+				goto error_out;
+			if (where % sizeof(uint32_t))
+				goto error_out;
+		}
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+		/* Sanity check on PCI config write command line arguments */
+		if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
+			goto error_out;
+		/* Write command to PCI config space, read-modify-write */
+		where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+		count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+		value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
+		/* Sanity checks */
+		if ((count != sizeof(uint8_t)) &&
+		    (count != sizeof(uint16_t)) &&
+		    (count != sizeof(uint32_t)))
+			goto error_out;
+		if (count == sizeof(uint8_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+				goto error_out;
+			if (where % sizeof(uint8_t))
+				goto error_out;
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+				pci_write_config_byte(pdev, where,
+						      (uint8_t)value);
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+				rc = pci_read_config_byte(pdev, where, &u8val);
+				if (!rc) {
+					u8val |= (uint8_t)value;
+					pci_write_config_byte(pdev, where,
+							      u8val);
+				}
+			}
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+				rc = pci_read_config_byte(pdev, where, &u8val);
+				if (!rc) {
+					u8val &= (uint8_t)(~value);
+					pci_write_config_byte(pdev, where,
+							      u8val);
+				}
+			}
+		}
+		if (count == sizeof(uint16_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+				goto error_out;
+			if (where % sizeof(uint16_t))
+				goto error_out;
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+				pci_write_config_word(pdev, where,
+						      (uint16_t)value);
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+				rc = pci_read_config_word(pdev, where, &u16val);
+				if (!rc) {
+					u16val |= (uint16_t)value;
+					pci_write_config_word(pdev, where,
+							      u16val);
+				}
+			}
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+				rc = pci_read_config_word(pdev, where, &u16val);
+				if (!rc) {
+					u16val &= (uint16_t)(~value);
+					pci_write_config_word(pdev, where,
+							      u16val);
+				}
+			}
+		}
+		if (count == sizeof(uint32_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+				goto error_out;
+			if (where % sizeof(uint32_t))
+				goto error_out;
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+				pci_write_config_dword(pdev, where, value);
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+				rc = pci_read_config_dword(pdev, where,
+							   &u32val);
+				if (!rc) {
+					u32val |= value;
+					pci_write_config_dword(pdev, where,
+							       u32val);
+				}
+			}
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+				rc = pci_read_config_dword(pdev, where,
+							   &u32val);
+				if (!rc) {
+					u32val &= ~value;
+					pci_write_config_dword(pdev, where,
+							       u32val);
+				}
+			}
+		}
+	} else
+		/* All other opecodes are illegal for now */
+		goto error_out;
+
+	return nbytes;
+error_out:
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci bar memory mapped space
+ * according to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	int offset_label, offset, offset_run, len = 0, index;
+	int bar_num, acc_range, bar_size;
+	char *pbuffer;
+	void __iomem *mem_mapped_bar;
+	uint32_t if_type;
+	struct pci_dev *pdev;
+	uint32_t u32val;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+		bar_num   = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+		offset    = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+		acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+		bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+	} else
+		return 0;
+
+	if (acc_range == 0)
+		return 0;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if (bar_num == IDIAG_BARACC_BAR_0)
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		else if (bar_num == IDIAG_BARACC_BAR_1)
+			mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+		else if (bar_num == IDIAG_BARACC_BAR_2)
+			mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+		else
+			return 0;
+	} else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		if (bar_num == IDIAG_BARACC_BAR_0)
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		else
+			return 0;
+	} else
+		return 0;
+
+	/* Read single PCI bar space register */
+	if (acc_range == SINGLE_WORD) {
+		offset_run = offset;
+		u32val = readl(mem_mapped_bar + offset_run);
+		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+				"%05x: %08x\n", offset_run, u32val);
+	} else
+		goto baracc_browse;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+baracc_browse:
+
+	/* Browse all PCI bar space registers */
+	offset_label = idiag.offset.last_rd;
+	offset_run = offset_label;
+
+	/* Read PCI bar memory mapped space */
+	len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+			"%05x: ", offset_label);
+	index = LPFC_PCI_BAR_RD_SIZE;
+	while (index > 0) {
+		u32val = readl(mem_mapped_bar + offset_run);
+		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+				"%08x ", u32val);
+		offset_run += sizeof(uint32_t);
+		if (acc_range == LPFC_PCI_BAR_BROWSE) {
+			if (offset_run >= bar_size) {
+				len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+				break;
+			}
+		} else {
+			if (offset_run >= offset +
+			    (acc_range * sizeof(uint32_t))) {
+				len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+				break;
+			}
+		}
+		index -= sizeof(uint32_t);
+		if (!index)
+			len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+		else if (!(index % (8 * sizeof(uint32_t)))) {
+			offset_label += (8 * sizeof(uint32_t));
+			len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len,
+					"\n%05x: ", offset_label);
+		}
+	}
+
+	/* Set up the offset for next portion of pci bar read */
+	if (index == 0) {
+		idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
+		if (acc_range == LPFC_PCI_BAR_BROWSE) {
+			if (idiag.offset.last_rd >= bar_size)
+				idiag.offset.last_rd = 0;
+		} else {
+			if (offset_run >= offset +
+			    (acc_range * sizeof(uint32_t)))
+				idiag.offset.last_rd = offset;
+		}
+	} else {
+		if (acc_range == LPFC_PCI_BAR_BROWSE)
+			idiag.offset.last_rd = 0;
+		else
+			idiag.offset.last_rd = offset;
+	}
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI bar memory mapped space read or
+ * write command accordingly. In the case of PCI bar memory mapped space
+ * read command, it sets up the command in the idiag command struct for
+ * the debugfs read operation. In the case of PCI bar memorpy mapped space
+ * write operation, it executes the write operation into the PCI bar memory
+ * mapped space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t bar_num, bar_size, offset, value, acc_range;
+	struct pci_dev *pdev;
+	void __iomem *mem_mapped_bar;
+	uint32_t if_type;
+	uint32_t u32val;
+	int rc;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return -EFAULT;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if ((bar_num != IDIAG_BARACC_BAR_0) &&
+		    (bar_num != IDIAG_BARACC_BAR_1) &&
+		    (bar_num != IDIAG_BARACC_BAR_2))
+			goto error_out;
+	} else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		if (bar_num != IDIAG_BARACC_BAR_0)
+			goto error_out;
+	} else
+		goto error_out;
+
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if (bar_num == IDIAG_BARACC_BAR_0) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF0_BAR0_SIZE;
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		} else if (bar_num == IDIAG_BARACC_BAR_1) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF0_BAR1_SIZE;
+			mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+		} else if (bar_num == IDIAG_BARACC_BAR_2) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF0_BAR2_SIZE;
+			mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+		} else
+			goto error_out;
+	} else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		if (bar_num == IDIAG_BARACC_BAR_0) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF2_BAR0_SIZE;
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		} else
+			goto error_out;
+	} else
+		goto error_out;
+
+	offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+	if (offset % sizeof(uint32_t))
+		goto error_out;
+
+	bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+		/* Sanity check on PCI config read command line arguments */
+		if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
+			goto error_out;
+		acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+		if (acc_range == LPFC_PCI_BAR_BROWSE) {
+			if (offset > bar_size - sizeof(uint32_t))
+				goto error_out;
+			/* Starting offset to browse */
+			idiag.offset.last_rd = offset;
+		} else if (acc_range > SINGLE_WORD) {
+			if (offset + acc_range * sizeof(uint32_t) > bar_size)
+				goto error_out;
+			/* Starting offset to browse */
+			idiag.offset.last_rd = offset;
+		} else if (acc_range != SINGLE_WORD)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+		/* Sanity check on PCI bar write command line arguments */
+		if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
+			goto error_out;
+		/* Write command to PCI bar space, read-modify-write */
+		acc_range = SINGLE_WORD;
+		value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
+			writel(value, mem_mapped_bar + offset);
+			readl(mem_mapped_bar + offset);
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
+			u32val = readl(mem_mapped_bar + offset);
+			u32val |= value;
+			writel(u32val, mem_mapped_bar + offset);
+			readl(mem_mapped_bar + offset);
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+			u32val = readl(mem_mapped_bar + offset);
+			u32val &= ~value;
+			writel(u32val, mem_mapped_bar + offset);
+			readl(mem_mapped_bar + offset);
+		}
+	} else
+		/* All other opecodes are illegal for now */
+		goto error_out;
+
+	return nbytes;
+error_out:
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+static int
+__lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
+			char *pbuffer, int len)
+{
+	if (!qp)
+		return len;
+
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\t\t%s WQ info: ", wqtype);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n",
+			qp->assoc_qid, qp->q_cnt_1,
+			(unsigned long long)qp->q_cnt_4);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+			qp->queue_id, qp->entry_count,
+			qp->entry_size, qp->host_index,
+			qp->hba_index, qp->entry_repost);
+	len +=  snprintf(pbuffer + len,
+			LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+	return len;
+}
+
+static int
+lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
+		int *len, int max_cnt, int cq_id)
+{
+	struct lpfc_queue *qp;
+	int qidx;
+
+	for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
+		qp = phba->sli4_hba.fcp_wq[qidx];
+		if (qp->assoc_qid != cq_id)
+			continue;
+		*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
+		if (*len >= max_cnt)
+			return 1;
+	}
+	for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+		qp = phba->sli4_hba.nvme_wq[qidx];
+		if (qp->assoc_qid != cq_id)
+			continue;
+		*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
+		if (*len >= max_cnt)
+			return 1;
+	}
+	return 0;
+}
+
+static int
+__lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
+			char *pbuffer, int len)
+{
+	if (!qp)
+		return len;
+
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\t%s CQ info: ", cqtype);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x "
+			"xabt:x%x wq:x%llx]\n",
+			qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
+			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+			qp->queue_id, qp->entry_count,
+			qp->entry_size, qp->host_index,
+			qp->hba_index, qp->entry_repost);
+
+	len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+
+	return len;
+}
+
+static int
+__lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
+			char *rqtype, char *pbuffer, int len)
+{
+	if (!qp || !datqp)
+		return len;
+
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\t\t%s RQ info: ", rqtype);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
+			"posted:x%x rcv:x%llx]\n",
+			qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
+			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
+			qp->queue_id, qp->entry_count, qp->entry_size,
+			qp->host_index, qp->hba_index, qp->entry_repost);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n",
+			datqp->queue_id, datqp->entry_count,
+			datqp->entry_size, datqp->host_index,
+			datqp->hba_index, datqp->entry_repost);
+	return len;
+}
+
+static int
+lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
+		int *len, int max_cnt, int eqidx, int eq_id)
+{
+	struct lpfc_queue *qp;
+	int qidx, rc;
+
+	for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
+		qp = phba->sli4_hba.fcp_cq[qidx];
+		if (qp->assoc_qid != eq_id)
+			continue;
+
+		*len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+
+		/* Reset max counter */
+		qp->CQ_max_cqe = 0;
+
+		if (*len >= max_cnt)
+			return 1;
+
+		rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+				max_cnt, qp->queue_id);
+		if (rc)
+			return 1;
+	}
+
+	for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+		qp = phba->sli4_hba.nvme_cq[qidx];
+		if (qp->assoc_qid != eq_id)
+			continue;
+
+		*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
+
+		/* Reset max counter */
+		qp->CQ_max_cqe = 0;
+
+		if (*len >= max_cnt)
+			return 1;
+
+		rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
+				max_cnt, qp->queue_id);
+		if (rc)
+			return 1;
+	}
+
+	if (eqidx < phba->cfg_nvmet_mrq) {
+		/* NVMET CQset */
+		qp = phba->sli4_hba.nvmet_cqset[eqidx];
+		*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
+
+		/* Reset max counter */
+		qp->CQ_max_cqe = 0;
+
+		if (*len >= max_cnt)
+			return 1;
+
+		/* RQ header */
+		qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx];
+		*len = __lpfc_idiag_print_rqpair(qp,
+				phba->sli4_hba.nvmet_mrq_data[eqidx],
+				"NVMET MRQ", pbuffer, *len);
+
+		if (*len >= max_cnt)
+			return 1;
+	}
+
+	return 0;
+}
+
+static int
+__lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
+			char *pbuffer, int len)
+{
+	if (!qp)
+		return len;
+
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
+			"bs:x%x proc:x%llx eqd %d]\n",
+			eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
+			(unsigned long long)qp->q_cnt_4, qp->q_mode);
+	len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+			"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
+			"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
+			qp->queue_id, qp->entry_count, qp->entry_size,
+			qp->host_index, qp->hba_index, qp->entry_repost);
+	len +=  snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ * This routine only returns 1 EQs worth of information. It remembers the last
+ * EQ read and jumps to the next EQ. Thus subsequent calls to queInfo will
+ * retrieve all EQs allocated for the phba.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+			loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	char *pbuffer;
+	int max_cnt, rc, x, len = 0;
+	struct lpfc_queue *qp = NULL;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+	max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 256;
+
+	if (*ppos)
+		return 0;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Fast-path event queue */
+	if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
+
+		x = phba->lpfc_idiag_last_eq;
+		if (phba->cfg_fof && (x >= phba->io_channel_irqs)) {
+			phba->lpfc_idiag_last_eq = 0;
+			goto fof;
+		}
+		phba->lpfc_idiag_last_eq++;
+		if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
+			if (phba->cfg_fof == 0)
+				phba->lpfc_idiag_last_eq = 0;
+
+		len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+					"EQ %d out of %d HBA EQs\n",
+					x, phba->io_channel_irqs);
+
+		/* Fast-path EQ */
+		qp = phba->sli4_hba.hba_eq[x];
+		if (!qp)
+			goto out;
+
+		len = __lpfc_idiag_print_eq(qp, "HBA", pbuffer, len);
+
+		/* Reset max counter */
+		qp->EQ_max_eqe = 0;
+
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* will dump both fcp and nvme cqs/wqs for the eq */
+		rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len,
+			max_cnt, x, qp->queue_id);
+		if (rc)
+			goto too_big;
+
+		/* Only EQ 0 has slow path CQs configured */
+		if (x)
+			goto out;
+
+		/* Slow-path mailbox CQ */
+		qp = phba->sli4_hba.mbx_cq;
+		len = __lpfc_idiag_print_cq(qp, "MBX", pbuffer, len);
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* Slow-path MBOX MQ */
+		qp = phba->sli4_hba.mbx_wq;
+		len = __lpfc_idiag_print_wq(qp, "MBX", pbuffer, len);
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* Slow-path ELS response CQ */
+		qp = phba->sli4_hba.els_cq;
+		len = __lpfc_idiag_print_cq(qp, "ELS", pbuffer, len);
+		/* Reset max counter */
+		if (qp)
+			qp->CQ_max_cqe = 0;
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* Slow-path ELS WQ */
+		qp = phba->sli4_hba.els_wq;
+		len = __lpfc_idiag_print_wq(qp, "ELS", pbuffer, len);
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* Slow-path NVME LS response CQ */
+		qp = phba->sli4_hba.nvmels_cq;
+		len = __lpfc_idiag_print_cq(qp, "NVME LS",
+						pbuffer, len);
+		/* Reset max counter */
+		if (qp)
+			qp->CQ_max_cqe = 0;
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* Slow-path NVME LS WQ */
+		qp = phba->sli4_hba.nvmels_wq;
+		len = __lpfc_idiag_print_wq(qp, "NVME LS",
+						pbuffer, len);
+		if (len >= max_cnt)
+			goto too_big;
+
+		qp = phba->sli4_hba.hdr_rq;
+		len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+				"RQpair", pbuffer, len);
+		if (len >= max_cnt)
+			goto too_big;
+
+		goto out;
+	}
+
+fof:
+	if (phba->cfg_fof) {
+		/* FOF EQ */
+		qp = phba->sli4_hba.fof_eq;
+		len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len);
+
+		/* Reset max counter */
+		if (qp)
+			qp->EQ_max_eqe = 0;
+
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* OAS CQ */
+		qp = phba->sli4_hba.oas_cq;
+		len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len);
+		/* Reset max counter */
+		if (qp)
+			qp->CQ_max_cqe = 0;
+		if (len >= max_cnt)
+			goto too_big;
+
+		/* OAS WQ */
+		qp = phba->sli4_hba.oas_wq;
+		len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len);
+		if (len >= max_cnt)
+			goto too_big;
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+too_big:
+	len +=  snprintf(pbuffer + len,
+		LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n");
+out:
+	spin_unlock_irq(&phba->hbalock);
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_que_param_check - queue access command parameter sanity check
+ * @q: The pointer to queue structure.
+ * @index: The index into a queue entry.
+ * @count: The number of queue entries to access.
+ *
+ * Description:
+ * The routine performs sanity check on device queue access method commands.
+ *
+ * Returns:
+ * This function returns -EINVAL when fails the sanity check, otherwise, it
+ * returns 0.
+ **/
+static int
+lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
+{
+	/* Only support single entry read or browsing */
+	if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE))
+		return -EINVAL;
+	if (index > q->entry_count - 1)
+		return -EINVAL;
+	return 0;
+}
+
+/**
+ * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
+ * @pbuffer: The pointer to buffer to copy the read data into.
+ * @pque: The pointer to the queue to be read.
+ * @index: The index into the queue entry.
+ *
+ * Description:
+ * This routine reads out a single entry from the given queue's index location
+ * and copies it into the buffer provided.
+ *
+ * Returns:
+ * This function returns 0 when it fails, otherwise, it returns the length of
+ * the data read into the buffer provided.
+ **/
+static int
+lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
+			  uint32_t index)
+{
+	int offset, esize;
+	uint32_t *pentry;
+
+	if (!pbuffer || !pque)
+		return 0;
+
+	esize = pque->entry_size;
+	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+			"QE-INDEX[%04d]:\n", index);
+
+	offset = 0;
+	pentry = pque->qe[index].address;
+	while (esize > 0) {
+		len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+				"%08x ", *pentry);
+		pentry++;
+		offset += sizeof(uint32_t);
+		esize -= sizeof(uint32_t);
+		if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
+			len += snprintf(pbuffer+len,
+					LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+	}
+	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_queacc_read - idiag debugfs read port queue
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba device queue memory according to the
+ * idiag command, and copies to user @buf. Depending on the queue dump read
+ * command setup, it does either a single queue entry read or browing through
+ * all entries of the queue.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	uint32_t last_index, index, count;
+	struct lpfc_queue *pque = NULL;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+		index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+		count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+		pque = (struct lpfc_queue *)idiag.ptr_private;
+	} else
+		return 0;
+
+	/* Browse the queue starting from index */
+	if (count == LPFC_QUE_ACC_BROWSE)
+		goto que_browse;
+
+	/* Read a single entry from the queue */
+	len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+que_browse:
+
+	/* Browse all entries from the queue */
+	last_index = idiag.offset.last_rd;
+	index = last_index;
+
+	while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) {
+		len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
+		index++;
+		if (index > pque->entry_count - 1)
+			break;
+	}
+
+	/* Set up the offset for next portion of pci cfg read */
+	if (index > pque->entry_count - 1)
+		index = 0;
+	idiag.offset.last_rd = index;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port queue read (dump) or write (set) command
+ * accordingly. In the case of port queue read command, it sets up the command
+ * in the idiag command struct for the following debugfs read operation. In
+ * the case of port queue write operation, it executes the write operation
+ * into the port queue entry accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t qidx, quetp, queid, index, count, offset, value;
+	uint32_t *pentry;
+	struct lpfc_queue *pque, *qp;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Get and sanity check on command feilds */
+	quetp  = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
+	queid  = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
+	index  = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+	count  = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+	offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
+	value  = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
+
+	/* Sanity check on command line arguments */
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
+		if (rc != LPFC_QUE_ACC_WR_CMD_ARG)
+			goto error_out;
+		if (count != 1)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+		if (rc != LPFC_QUE_ACC_RD_CMD_ARG)
+			goto error_out;
+	} else
+		goto error_out;
+
+	switch (quetp) {
+	case LPFC_IDIAG_EQ:
+		/* HBA event queue */
+		if (phba->sli4_hba.hba_eq) {
+			for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
+				qp = phba->sli4_hba.hba_eq[qidx];
+				if (qp && qp->queue_id == queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(qp,
+						index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private = qp;
+					goto pass_check;
+				}
+			}
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_CQ:
+		/* MBX complete queue */
+		if (phba->sli4_hba.mbx_cq &&
+		    phba->sli4_hba.mbx_cq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.mbx_cq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.mbx_cq;
+			goto pass_check;
+		}
+		/* ELS complete queue */
+		if (phba->sli4_hba.els_cq &&
+		    phba->sli4_hba.els_cq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.els_cq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.els_cq;
+			goto pass_check;
+		}
+		/* NVME LS complete queue */
+		if (phba->sli4_hba.nvmels_cq &&
+		    phba->sli4_hba.nvmels_cq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.nvmels_cq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.nvmels_cq;
+			goto pass_check;
+		}
+		/* FCP complete queue */
+		if (phba->sli4_hba.fcp_cq) {
+			for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+								qidx++) {
+				qp = phba->sli4_hba.fcp_cq[qidx];
+				if (qp && qp->queue_id == queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						qp, index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private = qp;
+					goto pass_check;
+				}
+			}
+		}
+		/* NVME complete queue */
+		if (phba->sli4_hba.nvme_cq) {
+			qidx = 0;
+			do {
+				if (phba->sli4_hba.nvme_cq[qidx] &&
+				    phba->sli4_hba.nvme_cq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						phba->sli4_hba.nvme_cq[qidx],
+						index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.nvme_cq[qidx];
+					goto pass_check;
+				}
+			} while (++qidx < phba->cfg_nvme_io_channel);
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_MQ:
+		/* MBX work queue */
+		if (phba->sli4_hba.mbx_wq &&
+		    phba->sli4_hba.mbx_wq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.mbx_wq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.mbx_wq;
+			goto pass_check;
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_WQ:
+		/* ELS work queue */
+		if (phba->sli4_hba.els_wq &&
+		    phba->sli4_hba.els_wq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.els_wq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.els_wq;
+			goto pass_check;
+		}
+		/* NVME LS work queue */
+		if (phba->sli4_hba.nvmels_wq &&
+		    phba->sli4_hba.nvmels_wq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.nvmels_wq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.nvmels_wq;
+			goto pass_check;
+		}
+		/* FCP work queue */
+		if (phba->sli4_hba.fcp_wq) {
+			for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+								qidx++) {
+				qp = phba->sli4_hba.fcp_wq[qidx];
+				if (qp && qp->queue_id == queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						qp, index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private = qp;
+					goto pass_check;
+				}
+			}
+		}
+		/* NVME work queue */
+		if (phba->sli4_hba.nvme_wq) {
+			for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
+								qidx++) {
+				qp = phba->sli4_hba.nvme_wq[qidx];
+				if (qp && qp->queue_id == queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						qp, index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private = qp;
+					goto pass_check;
+				}
+			}
+		}
+
+		/* NVME work queues */
+		if (phba->sli4_hba.nvme_wq) {
+			for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
+				qidx++) {
+				if (!phba->sli4_hba.nvme_wq[qidx])
+					continue;
+				if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						phba->sli4_hba.nvme_wq[qidx],
+						index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.nvme_wq[qidx];
+					goto pass_check;
+				}
+			}
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_RQ:
+		/* HDR queue */
+		if (phba->sli4_hba.hdr_rq &&
+		    phba->sli4_hba.hdr_rq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.hdr_rq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.hdr_rq;
+			goto pass_check;
+		}
+		/* DAT queue */
+		if (phba->sli4_hba.dat_rq &&
+		    phba->sli4_hba.dat_rq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.dat_rq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.dat_rq;
+			goto pass_check;
+		}
+		goto error_out;
+		break;
+	default:
+		goto error_out;
+		break;
+	}
+
+pass_check:
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+		if (count == LPFC_QUE_ACC_BROWSE)
+			idiag.offset.last_rd = index;
+	}
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
+		/* Additional sanity checks on write operation */
+		pque = (struct lpfc_queue *)idiag.ptr_private;
+		if (offset > pque->entry_size/sizeof(uint32_t) - 1)
+			goto error_out;
+		pentry = pque->qe[index].address;
+		pentry += offset;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
+			*pentry = value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST)
+			*pentry |= value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL)
+			*pentry &= ~value;
+	}
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a doorbell register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+			   int len, uint32_t drbregid)
+{
+
+	if (!pbuffer)
+		return 0;
+
+	switch (drbregid) {
+	case LPFC_DRB_EQCQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"EQCQ-DRB-REG: 0x%08x\n",
+				readl(phba->sli4_hba.EQCQDBregaddr));
+		break;
+	case LPFC_DRB_MQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"MQ-DRB-REG:   0x%08x\n",
+				readl(phba->sli4_hba.MQDBregaddr));
+		break;
+	case LPFC_DRB_WQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"WQ-DRB-REG:   0x%08x\n",
+				readl(phba->sli4_hba.WQDBregaddr));
+		break;
+	case LPFC_DRB_RQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"RQ-DRB-REG:   0x%08x\n",
+				readl(phba->sli4_hba.RQDBregaddr));
+		break;
+	default:
+		break;
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba device doorbell register according
+ * to the idiag command, and copies to user @buf. Depending on the doorbell
+ * register read command setup, it does either a single doorbell register
+ * read or dump all doorbell registers.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t drb_reg_id, i;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
+		drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+	else
+		return 0;
+
+	if (drb_reg_id == LPFC_DRB_ACC_ALL)
+		for (i = 1; i <= LPFC_DRB_MAX; i++)
+			len = lpfc_idiag_drbacc_read_reg(phba,
+							 pbuffer, len, i);
+	else
+		len = lpfc_idiag_drbacc_read_reg(phba,
+						 pbuffer, len, drb_reg_id);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port doorbell register read (dump) or write
+ * (set) command accordingly. In the case of port queue read command, it sets
+ * up the command in the idiag command struct for the following debugfs read
+ * operation. In the case of port doorbell register write operation, it
+ * executes the write operation into the port doorbell register accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t drb_reg_id, value, reg_val = 0;
+	void __iomem *drb_reg;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Sanity check on command line arguments */
+	drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+	value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+		if (rc != LPFC_DRB_ACC_WR_CMD_ARG)
+			goto error_out;
+		if (drb_reg_id > LPFC_DRB_MAX)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) {
+		if (rc != LPFC_DRB_ACC_RD_CMD_ARG)
+			goto error_out;
+		if ((drb_reg_id > LPFC_DRB_MAX) &&
+		    (drb_reg_id != LPFC_DRB_ACC_ALL))
+			goto error_out;
+	} else
+		goto error_out;
+
+	/* Perform the write access operation */
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+		switch (drb_reg_id) {
+		case LPFC_DRB_EQCQ:
+			drb_reg = phba->sli4_hba.EQCQDBregaddr;
+			break;
+		case LPFC_DRB_MQ:
+			drb_reg = phba->sli4_hba.MQDBregaddr;
+			break;
+		case LPFC_DRB_WQ:
+			drb_reg = phba->sli4_hba.WQDBregaddr;
+			break;
+		case LPFC_DRB_RQ:
+			drb_reg = phba->sli4_hba.RQDBregaddr;
+			break;
+		default:
+			goto error_out;
+		}
+
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR)
+			reg_val = value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) {
+			reg_val = readl(drb_reg);
+			reg_val |= value;
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+			reg_val = readl(drb_reg);
+			reg_val &= ~value;
+		}
+		writel(reg_val, drb_reg);
+		readl(drb_reg); /* flush */
+	}
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a control register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+			   int len, uint32_t ctlregid)
+{
+
+	if (!pbuffer)
+		return 0;
+
+	switch (ctlregid) {
+	case LPFC_CTL_PORT_SEM:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port SemReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_SEM_OFFSET));
+		break;
+	case LPFC_CTL_PORT_STA:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port StaReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_STA_OFFSET));
+		break;
+	case LPFC_CTL_PORT_CTL:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port CtlReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_CTL_OFFSET));
+		break;
+	case LPFC_CTL_PORT_ER1:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port Er1Reg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_ER1_OFFSET));
+		break;
+	case LPFC_CTL_PORT_ER2:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port Er2Reg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_ER2_OFFSET));
+		break;
+	case LPFC_CTL_PDEV_CTL:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"PDev CtlReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PDEV_CTL_OFFSET));
+		break;
+	default:
+		break;
+	}
+	return len;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba port and device registers according
+ * to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t ctl_reg_id, i;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
+		ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+	else
+		return 0;
+
+	if (ctl_reg_id == LPFC_CTL_ACC_ALL)
+		for (i = 1; i <= LPFC_CTL_MAX; i++)
+			len = lpfc_idiag_ctlacc_read_reg(phba,
+							 pbuffer, len, i);
+	else
+		len = lpfc_idiag_ctlacc_read_reg(phba,
+						 pbuffer, len, ctl_reg_id);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port and device control register read (dump)
+ * or write (set) command accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t ctl_reg_id, value, reg_val = 0;
+	void __iomem *ctl_reg;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Sanity check on command line arguments */
+	ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+	value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+		if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
+			goto error_out;
+		if (ctl_reg_id > LPFC_CTL_MAX)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
+		if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
+			goto error_out;
+		if ((ctl_reg_id > LPFC_CTL_MAX) &&
+		    (ctl_reg_id != LPFC_CTL_ACC_ALL))
+			goto error_out;
+	} else
+		goto error_out;
+
+	/* Perform the write access operation */
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+		switch (ctl_reg_id) {
+		case LPFC_CTL_PORT_SEM:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_SEM_OFFSET;
+			break;
+		case LPFC_CTL_PORT_STA:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_STA_OFFSET;
+			break;
+		case LPFC_CTL_PORT_CTL:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_CTL_OFFSET;
+			break;
+		case LPFC_CTL_PORT_ER1:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_ER1_OFFSET;
+			break;
+		case LPFC_CTL_PORT_ER2:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_ER2_OFFSET;
+			break;
+		case LPFC_CTL_PDEV_CTL:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PDEV_CTL_OFFSET;
+			break;
+		default:
+			goto error_out;
+		}
+
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
+			reg_val = value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
+			reg_val = readl(ctl_reg);
+			reg_val |= value;
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+			reg_val = readl(ctl_reg);
+			reg_val &= ~value;
+		}
+		writel(reg_val, ctl_reg);
+		readl(ctl_reg); /* flush */
+	}
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
+ * @phba: Pointer to HBA context object.
+ * @pbuffer: Pointer to data buffer.
+ *
+ * Description:
+ * This routine gets the driver mailbox access debugfs setup information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static int
+lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
+{
+	uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+	int len = 0;
+
+	mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_dump_map: 0x%08x\n", mbx_dump_map);
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_dump_cnt: %04d\n", mbx_dump_cnt);
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_word_cnt: %04d\n", mbx_word_cnt);
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba driver mailbox access debugfs setup
+ * information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
+	    (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
+		return 0;
+
+	len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for driver mailbox command (dump) and sets up the
+ * necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Sanity check on command line arguments */
+	mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
+		if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
+			goto error_out;
+		if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
+		    (mbx_dump_map != LPFC_MBX_DMP_ALL))
+			goto error_out;
+		if (mbx_word_cnt > sizeof(MAILBOX_t))
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
+		if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
+			goto error_out;
+		if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
+		    (mbx_dump_map != LPFC_MBX_DMP_ALL))
+			goto error_out;
+		if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
+			goto error_out;
+		if (mbx_mbox_cmd != 0x9b)
+			goto error_out;
+	} else
+		goto error_out;
+
+	if (mbx_word_cnt == 0)
+		goto error_out;
+	if (rc != LPFC_MBX_DMP_ARG)
+		goto error_out;
+	if (mbx_mbox_cmd & ~0xff)
+		goto error_out;
+
+	/* condition for stop mailbox dump */
+	if (mbx_dump_cnt == 0)
+		goto reset_out;
+
+	return nbytes;
+
+reset_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_avail_get - get the available extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the available extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+	uint16_t ext_cnt, ext_size;
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\nAvailable Extents Information:\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available VPI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available VFI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available RPI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available XRI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_extacc_alloc_get - get the allocated extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the allocated extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+	uint16_t ext_cnt, ext_size;
+	int rc;
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\nAllocated Extents Information:\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated VPI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated VFI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated RPI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated XRI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_extacc_drivr_get - get driver extent information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the driver extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+	struct lpfc_rsrc_blks *rsrc_blks;
+	int index;
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\nDriver Extents Information:\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tVPI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tVFI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
+			    list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tRPI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
+			    list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tXRI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
+			    list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for extent information access commands and sets
+ * up the necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	uint32_t ext_map;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+
+	if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+		goto error_out;
+	if (rc != LPFC_EXT_ACC_CMD_ARG)
+		goto error_out;
+	if (!(ext_map & LPFC_EXT_ACC_ALL))
+		goto error_out;
+
+	return nbytes;
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the proper extent information according to
+ * the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	char *pbuffer;
+	uint32_t ext_map;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+	if (*ppos)
+		return 0;
+	if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+		return 0;
+
+	ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+	if (ext_map & LPFC_EXT_ACC_AVAIL)
+		len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
+	if (ext_map & LPFC_EXT_ACC_ALLOC)
+		len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
+	if (ext_map & LPFC_EXT_ACC_DRIVR)
+		len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+#undef lpfc_debugfs_op_disc_trc
+static const struct file_operations lpfc_debugfs_op_disc_trc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_disc_trc_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nodelist
+static const struct file_operations lpfc_debugfs_op_nodelist = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_nodelist_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_hbqinfo
+static const struct file_operations lpfc_debugfs_op_hbqinfo = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_hbqinfo_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHBASlim
+static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpHBASlim_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHostSlim
+static const struct file_operations lpfc_debugfs_op_dumpHostSlim = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpHostSlim_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nvmestat
+static const struct file_operations lpfc_debugfs_op_nvmestat = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_nvmestat_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_nvmestat_write,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nvmektime
+static const struct file_operations lpfc_debugfs_op_nvmektime = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_nvmektime_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_nvmektime_write,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nvmeio_trc
+static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_nvmeio_trc_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_nvmeio_trc_write,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_cpucheck
+static const struct file_operations lpfc_debugfs_op_cpucheck = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_cpucheck_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_cpucheck_write,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpData
+static const struct file_operations lpfc_debugfs_op_dumpData = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpData_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_dumpDataDif_write,
+	.release =      lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dumpDif
+static const struct file_operations lpfc_debugfs_op_dumpDif = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpDif_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_dumpDataDif_write,
+	.release =      lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dif_err
+static const struct file_operations lpfc_debugfs_op_dif_err = {
+	.owner =	THIS_MODULE,
+	.open =		simple_open,
+	.llseek =	lpfc_debugfs_lseek,
+	.read =		lpfc_debugfs_dif_err_read,
+	.write =	lpfc_debugfs_dif_err_write,
+	.release =	lpfc_debugfs_dif_err_release,
+};
+
+#undef lpfc_debugfs_op_slow_ring_trc
+static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_slow_ring_trc_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+static struct dentry *lpfc_debugfs_root = NULL;
+static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_pcicfg_read,
+	.write =        lpfc_idiag_pcicfg_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_barAcc
+static const struct file_operations lpfc_idiag_op_barAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_baracc_read,
+	.write =        lpfc_idiag_baracc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.read =         lpfc_idiag_queinfo_read,
+	.release =      lpfc_idiag_release,
+};
+
+#undef lpfc_idiag_op_queAcc
+static const struct file_operations lpfc_idiag_op_queAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_queacc_read,
+	.write =        lpfc_idiag_queacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_drbAcc
+static const struct file_operations lpfc_idiag_op_drbAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_drbacc_read,
+	.write =        lpfc_idiag_drbacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_ctlAcc
+static const struct file_operations lpfc_idiag_op_ctlAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_ctlacc_read,
+	.write =        lpfc_idiag_ctlacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_mbxAcc
+static const struct file_operations lpfc_idiag_op_mbxAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_mbxacc_read,
+	.write =        lpfc_idiag_mbxacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_extAcc
+static const struct file_operations lpfc_idiag_op_extAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_extacc_read,
+	.write =        lpfc_idiag_extacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#endif
+
+/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a bsg pass-through non-embedded mailbox command with
+ * external buffer.
+ **/
+void
+lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+				enum mbox_type mbox_tp, enum dma_type dma_tp,
+				enum sta_type sta_tp,
+				struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
+	char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+	int len = 0;
+	uint32_t do_dump = 0;
+	uint32_t *pword;
+	uint32_t i;
+
+	if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
+		return;
+
+	mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
+	    (*mbx_dump_cnt == 0) ||
+	    (*mbx_word_cnt == 0))
+		return;
+
+	if (*mbx_mbox_cmd != 0x9B)
+		return;
+
+	if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
+			do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
+			pr_err("\nRead mbox command (x%x), "
+			       "nemb:0x%x, extbuf_cnt:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+	if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
+			do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
+			pr_err("\nRead mbox buffer (x%x), "
+			       "nemb:0x%x, extbuf_seq:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+	if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
+			do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
+			pr_err("\nWrite mbox command (x%x), "
+			       "nemb:0x%x, extbuf_cnt:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+	if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
+			do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
+			pr_err("\nWrite mbox buffer (x%x), "
+			       "nemb:0x%x, extbuf_seq:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+
+	/* dump buffer content */
+	if (do_dump) {
+		pword = (uint32_t *)dmabuf->virt;
+		for (i = 0; i < *mbx_word_cnt; i++) {
+			if (!(i % 8)) {
+				if (i != 0)
+					pr_err("%s\n", line_buf);
+				len = 0;
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+					"%08x ", (uint32_t)*pword);
+			pword++;
+		}
+		if ((i - 1) % 8)
+			pr_err("%s\n", line_buf);
+		(*mbx_dump_cnt)--;
+	}
+
+	/* Clean out command structure on reaching dump count */
+	if (*mbx_dump_cnt == 0)
+		memset(&idiag, 0, sizeof(idiag));
+	return;
+#endif
+}
+
+/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a pass-through non-embedded mailbox command from issue
+ * mailbox command.
+ **/
+void
+lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
+	char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+	int len = 0;
+	uint32_t *pword;
+	uint8_t *pbyte;
+	uint32_t i, j;
+
+	if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
+		return;
+
+	mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
+	    (*mbx_dump_cnt == 0) ||
+	    (*mbx_word_cnt == 0))
+		return;
+
+	if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
+	    (*mbx_mbox_cmd != pmbox->mbxCommand))
+		return;
+
+	/* dump buffer content */
+	if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
+		pr_err("Mailbox command:0x%x dump by word:\n",
+		       pmbox->mbxCommand);
+		pword = (uint32_t *)pmbox;
+		for (i = 0; i < *mbx_word_cnt; i++) {
+			if (!(i % 8)) {
+				if (i != 0)
+					pr_err("%s\n", line_buf);
+				len = 0;
+				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+					"%08x ",
+					((uint32_t)*pword) & 0xffffffff);
+			pword++;
+		}
+		if ((i - 1) % 8)
+			pr_err("%s\n", line_buf);
+		pr_err("\n");
+	}
+	if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
+		pr_err("Mailbox command:0x%x dump by byte:\n",
+		       pmbox->mbxCommand);
+		pbyte = (uint8_t *)pmbox;
+		for (i = 0; i < *mbx_word_cnt; i++) {
+			if (!(i % 8)) {
+				if (i != 0)
+					pr_err("%s\n", line_buf);
+				len = 0;
+				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+			for (j = 0; j < 4; j++) {
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%02x",
+						((uint8_t)*pbyte) & 0xff);
+				pbyte++;
+			}
+			len += snprintf(line_buf+len,
+					LPFC_MBX_ACC_LBUF_SZ-len, " ");
+		}
+		if ((i - 1) % 8)
+			pr_err("%s\n", line_buf);
+		pr_err("\n");
+	}
+	(*mbx_dump_cnt)--;
+
+	/* Clean out command structure on reaching dump count */
+	if (*mbx_dump_cnt == 0)
+		memset(&idiag, 0, sizeof(idiag));
+	return;
+#endif
+}
+
+/**
+ * lpfc_debugfs_initialize - Initialize debugfs for a vport
+ * @vport: The vport pointer to initialize.
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the lpfc debugfs file system.
+ * If not already created, this routine will create the lpfc directory, and
+ * lpfcX directory (for this HBA), and vportX directory for this vport. It will
+ * also create each file used to access lpfc specific debugfs information.
+ **/
+inline void
+lpfc_debugfs_initialize(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_hba   *phba = vport->phba;
+	char name[64];
+	uint32_t num, i;
+	bool pport_setup = false;
+
+	if (!lpfc_debugfs_enable)
+		return;
+
+	/* Setup lpfc root directory */
+	if (!lpfc_debugfs_root) {
+		lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
+		atomic_set(&lpfc_debugfs_hba_count, 0);
+		if (!lpfc_debugfs_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0408 Cannot create debugfs root\n");
+			goto debug_failed;
+		}
+	}
+	if (!lpfc_debugfs_start_time)
+		lpfc_debugfs_start_time = jiffies;
+
+	/* Setup funcX directory for specific HBA PCI function */
+	snprintf(name, sizeof(name), "fn%d", phba->brd_no);
+	if (!phba->hba_debugfs_root) {
+		pport_setup = true;
+		phba->hba_debugfs_root =
+			debugfs_create_dir(name, lpfc_debugfs_root);
+		if (!phba->hba_debugfs_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0412 Cannot create debugfs hba\n");
+			goto debug_failed;
+		}
+		atomic_inc(&lpfc_debugfs_hba_count);
+		atomic_set(&phba->debugfs_vport_count, 0);
+
+		/* Setup hbqinfo */
+		snprintf(name, sizeof(name), "hbqinfo");
+		phba->debug_hbqinfo =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_hbqinfo);
+		if (!phba->debug_hbqinfo) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0411 Cannot create debugfs hbqinfo\n");
+			goto debug_failed;
+		}
+
+		/* Setup dumpHBASlim */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			snprintf(name, sizeof(name), "dumpHBASlim");
+			phba->debug_dumpHBASlim =
+				debugfs_create_file(name,
+					S_IFREG|S_IRUGO|S_IWUSR,
+					phba->hba_debugfs_root,
+					phba, &lpfc_debugfs_op_dumpHBASlim);
+			if (!phba->debug_dumpHBASlim) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						 "0413 Cannot create debugfs "
+						"dumpHBASlim\n");
+				goto debug_failed;
+			}
+		} else
+			phba->debug_dumpHBASlim = NULL;
+
+		/* Setup dumpHostSlim */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			snprintf(name, sizeof(name), "dumpHostSlim");
+			phba->debug_dumpHostSlim =
+				debugfs_create_file(name,
+					S_IFREG|S_IRUGO|S_IWUSR,
+					phba->hba_debugfs_root,
+					phba, &lpfc_debugfs_op_dumpHostSlim);
+			if (!phba->debug_dumpHostSlim) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						 "0414 Cannot create debugfs "
+						 "dumpHostSlim\n");
+				goto debug_failed;
+			}
+		} else
+			phba->debug_dumpHostSlim = NULL;
+
+		/* Setup dumpData */
+		snprintf(name, sizeof(name), "dumpData");
+		phba->debug_dumpData =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_dumpData);
+		if (!phba->debug_dumpData) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0800 Cannot create debugfs dumpData\n");
+			goto debug_failed;
+		}
+
+		/* Setup dumpDif */
+		snprintf(name, sizeof(name), "dumpDif");
+		phba->debug_dumpDif =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_dumpDif);
+		if (!phba->debug_dumpDif) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0801 Cannot create debugfs dumpDif\n");
+			goto debug_failed;
+		}
+
+		/* Setup DIF Error Injections */
+		snprintf(name, sizeof(name), "InjErrLBA");
+		phba->debug_InjErrLBA =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrLBA) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0807 Cannot create debugfs InjErrLBA\n");
+			goto debug_failed;
+		}
+		phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+
+		snprintf(name, sizeof(name), "InjErrNPortID");
+		phba->debug_InjErrNPortID =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrNPortID) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0809 Cannot create debugfs InjErrNPortID\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "InjErrWWPN");
+		phba->debug_InjErrWWPN =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrWWPN) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0810 Cannot create debugfs InjErrWWPN\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "writeGuardInjErr");
+		phba->debug_writeGuard =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_writeGuard) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0802 Cannot create debugfs writeGuard\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "writeAppInjErr");
+		phba->debug_writeApp =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_writeApp) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0803 Cannot create debugfs writeApp\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "writeRefInjErr");
+		phba->debug_writeRef =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_writeRef) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0804 Cannot create debugfs writeRef\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "readGuardInjErr");
+		phba->debug_readGuard =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readGuard) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0808 Cannot create debugfs readGuard\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "readAppInjErr");
+		phba->debug_readApp =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readApp) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0805 Cannot create debugfs readApp\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "readRefInjErr");
+		phba->debug_readRef =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readRef) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0806 Cannot create debugfs readApp\n");
+			goto debug_failed;
+		}
+
+		/* Setup slow ring trace */
+		if (lpfc_debugfs_max_slow_ring_trc) {
+			num = lpfc_debugfs_max_slow_ring_trc - 1;
+			if (num & lpfc_debugfs_max_slow_ring_trc) {
+				/* Change to be a power of 2 */
+				num = lpfc_debugfs_max_slow_ring_trc;
+				i = 0;
+				while (num > 1) {
+					num = num >> 1;
+					i++;
+				}
+				lpfc_debugfs_max_slow_ring_trc = (1 << i);
+				pr_err("lpfc_debugfs_max_disc_trc changed to "
+				       "%d\n", lpfc_debugfs_max_disc_trc);
+			}
+		}
+
+		snprintf(name, sizeof(name), "slow_ring_trace");
+		phba->debug_slow_ring_trc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_slow_ring_trc);
+		if (!phba->debug_slow_ring_trc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0415 Cannot create debugfs "
+					 "slow_ring_trace\n");
+			goto debug_failed;
+		}
+		if (!phba->slow_ring_trc) {
+			phba->slow_ring_trc = kmalloc(
+				(sizeof(struct lpfc_debugfs_trc) *
+				lpfc_debugfs_max_slow_ring_trc),
+				GFP_KERNEL);
+			if (!phba->slow_ring_trc) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						 "0416 Cannot create debugfs "
+						 "slow_ring buffer\n");
+				goto debug_failed;
+			}
+			atomic_set(&phba->slow_ring_trc_cnt, 0);
+			memset(phba->slow_ring_trc, 0,
+				(sizeof(struct lpfc_debugfs_trc) *
+				lpfc_debugfs_max_slow_ring_trc));
+		}
+
+		snprintf(name, sizeof(name), "nvmeio_trc");
+		phba->debug_nvmeio_trc =
+			debugfs_create_file(name, 0644,
+					    phba->hba_debugfs_root,
+					    phba, &lpfc_debugfs_op_nvmeio_trc);
+		if (!phba->debug_nvmeio_trc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0574 No create debugfs nvmeio_trc\n");
+			goto debug_failed;
+		}
+
+		atomic_set(&phba->nvmeio_trc_cnt, 0);
+		if (lpfc_debugfs_max_nvmeio_trc) {
+			num = lpfc_debugfs_max_nvmeio_trc - 1;
+			if (num & lpfc_debugfs_max_disc_trc) {
+				/* Change to be a power of 2 */
+				num = lpfc_debugfs_max_nvmeio_trc;
+				i = 0;
+				while (num > 1) {
+					num = num >> 1;
+					i++;
+				}
+				lpfc_debugfs_max_nvmeio_trc = (1 << i);
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0575 lpfc_debugfs_max_nvmeio_trc "
+						"changed to %d\n",
+						lpfc_debugfs_max_nvmeio_trc);
+			}
+			phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc;
+
+			/* Allocate trace buffer and initialize */
+			phba->nvmeio_trc = kmalloc(
+				(sizeof(struct lpfc_debugfs_nvmeio_trc) *
+				phba->nvmeio_trc_size), GFP_KERNEL);
+
+			if (!phba->nvmeio_trc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0576 Cannot create debugfs "
+						"nvmeio_trc buffer\n");
+				goto nvmeio_off;
+			}
+			memset(phba->nvmeio_trc, 0,
+			       (sizeof(struct lpfc_debugfs_nvmeio_trc) *
+			       phba->nvmeio_trc_size));
+			phba->nvmeio_trc_on = 1;
+			phba->nvmeio_trc_output_idx = 0;
+			phba->nvmeio_trc = NULL;
+		} else {
+nvmeio_off:
+			phba->nvmeio_trc_size = 0;
+			phba->nvmeio_trc_on = 0;
+			phba->nvmeio_trc_output_idx = 0;
+			phba->nvmeio_trc = NULL;
+		}
+	}
+
+	snprintf(name, sizeof(name), "vport%d", vport->vpi);
+	if (!vport->vport_debugfs_root) {
+		vport->vport_debugfs_root =
+			debugfs_create_dir(name, phba->hba_debugfs_root);
+		if (!vport->vport_debugfs_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0417 Can't create debugfs\n");
+			goto debug_failed;
+		}
+		atomic_inc(&phba->debugfs_vport_count);
+	}
+
+	if (lpfc_debugfs_max_disc_trc) {
+		num = lpfc_debugfs_max_disc_trc - 1;
+		if (num & lpfc_debugfs_max_disc_trc) {
+			/* Change to be a power of 2 */
+			num = lpfc_debugfs_max_disc_trc;
+			i = 0;
+			while (num > 1) {
+				num = num >> 1;
+				i++;
+			}
+			lpfc_debugfs_max_disc_trc = (1 << i);
+			pr_err("lpfc_debugfs_max_disc_trc changed to %d\n",
+			       lpfc_debugfs_max_disc_trc);
+		}
+	}
+
+	vport->disc_trc = kzalloc(
+		(sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc),
+		GFP_KERNEL);
+
+	if (!vport->disc_trc) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0418 Cannot create debugfs disc trace "
+				 "buffer\n");
+		goto debug_failed;
+	}
+	atomic_set(&vport->disc_trc_cnt, 0);
+
+	snprintf(name, sizeof(name), "discovery_trace");
+	vport->debug_disc_trc =
+		debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 vport->vport_debugfs_root,
+				 vport, &lpfc_debugfs_op_disc_trc);
+	if (!vport->debug_disc_trc) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0419 Cannot create debugfs "
+				 "discovery_trace\n");
+		goto debug_failed;
+	}
+	snprintf(name, sizeof(name), "nodelist");
+	vport->debug_nodelist =
+		debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 vport->vport_debugfs_root,
+				 vport, &lpfc_debugfs_op_nodelist);
+	if (!vport->debug_nodelist) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "2985 Can't create debugfs nodelist\n");
+		goto debug_failed;
+	}
+
+	snprintf(name, sizeof(name), "nvmestat");
+	vport->debug_nvmestat =
+		debugfs_create_file(name, 0644,
+				    vport->vport_debugfs_root,
+				    vport, &lpfc_debugfs_op_nvmestat);
+	if (!vport->debug_nvmestat) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0811 Cannot create debugfs nvmestat\n");
+		goto debug_failed;
+	}
+
+	snprintf(name, sizeof(name), "nvmektime");
+	vport->debug_nvmektime =
+		debugfs_create_file(name, 0644,
+				    vport->vport_debugfs_root,
+				    vport, &lpfc_debugfs_op_nvmektime);
+	if (!vport->debug_nvmektime) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0815 Cannot create debugfs nvmektime\n");
+		goto debug_failed;
+	}
+
+	snprintf(name, sizeof(name), "cpucheck");
+	vport->debug_cpucheck =
+		debugfs_create_file(name, 0644,
+				    vport->vport_debugfs_root,
+				    vport, &lpfc_debugfs_op_cpucheck);
+	if (!vport->debug_cpucheck) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0819 Cannot create debugfs cpucheck\n");
+		goto debug_failed;
+	}
+
+	/*
+	 * The following section is for additional directories/files for the
+	 * physical port.
+	 */
+
+	if (!pport_setup)
+		goto debug_failed;
+
+	/*
+	 * iDiag debugfs root entry points for SLI4 device only
+	 */
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		goto debug_failed;
+
+	snprintf(name, sizeof(name), "iDiag");
+	if (!phba->idiag_root) {
+		phba->idiag_root =
+			debugfs_create_dir(name, phba->hba_debugfs_root);
+		if (!phba->idiag_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2922 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+		/* Initialize iDiag data structure */
+		memset(&idiag, 0, sizeof(idiag));
+	}
+
+	/* iDiag read PCI config space */
+	snprintf(name, sizeof(name), "pciCfg");
+	if (!phba->idiag_pci_cfg) {
+		phba->idiag_pci_cfg =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+		if (!phba->idiag_pci_cfg) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2923 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+		idiag.offset.last_rd = 0;
+	}
+
+	/* iDiag PCI BAR access */
+	snprintf(name, sizeof(name), "barAcc");
+	if (!phba->idiag_bar_acc) {
+		phba->idiag_bar_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
+		if (!phba->idiag_bar_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					"3056 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+		idiag.offset.last_rd = 0;
+	}
+
+	/* iDiag get PCI function queue information */
+	snprintf(name, sizeof(name), "queInfo");
+	if (!phba->idiag_que_info) {
+		phba->idiag_que_info =
+			debugfs_create_file(name, S_IFREG|S_IRUGO,
+			phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+		if (!phba->idiag_que_info) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2924 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access PCI function queue */
+	snprintf(name, sizeof(name), "queAcc");
+	if (!phba->idiag_que_acc) {
+		phba->idiag_que_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
+		if (!phba->idiag_que_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2926 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access PCI function doorbell registers */
+	snprintf(name, sizeof(name), "drbAcc");
+	if (!phba->idiag_drb_acc) {
+		phba->idiag_drb_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
+		if (!phba->idiag_drb_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2927 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access PCI function control registers */
+	snprintf(name, sizeof(name), "ctlAcc");
+	if (!phba->idiag_ctl_acc) {
+		phba->idiag_ctl_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
+		if (!phba->idiag_ctl_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2981 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access mbox commands */
+	snprintf(name, sizeof(name), "mbxAcc");
+	if (!phba->idiag_mbx_acc) {
+		phba->idiag_mbx_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
+		if (!phba->idiag_mbx_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					"2980 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag extents access commands */
+	if (phba->sli4_hba.extents_in_use) {
+		snprintf(name, sizeof(name), "extAcc");
+		if (!phba->idiag_ext_acc) {
+			phba->idiag_ext_acc =
+				debugfs_create_file(name,
+						    S_IFREG|S_IRUGO|S_IWUSR,
+						    phba->idiag_root, phba,
+						    &lpfc_idiag_op_extAcc);
+			if (!phba->idiag_ext_acc) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						"2986 Cant create "
+						"idiag debugfs\n");
+				goto debug_failed;
+			}
+		}
+	}
+
+debug_failed:
+	return;
+#endif
+}
+
+/**
+ * lpfc_debugfs_terminate -  Tear down debugfs infrastructure for this vport
+ * @vport: The vport pointer to remove from debugfs.
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system elements
+ * that are specific to this vport. It also checks to see if there are any
+ * users left for the debugfs directories associated with the HBA and driver. If
+ * this is the last user of the HBA directory or driver directory then it will
+ * remove those from the debugfs infrastructure as well.
+ **/
+inline void
+lpfc_debugfs_terminate(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_hba   *phba = vport->phba;
+
+	kfree(vport->disc_trc);
+	vport->disc_trc = NULL;
+
+	debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
+	vport->debug_disc_trc = NULL;
+
+	debugfs_remove(vport->debug_nodelist); /* nodelist */
+	vport->debug_nodelist = NULL;
+
+	debugfs_remove(vport->debug_nvmestat); /* nvmestat */
+	vport->debug_nvmestat = NULL;
+
+	debugfs_remove(vport->debug_nvmektime); /* nvmektime */
+	vport->debug_nvmektime = NULL;
+
+	debugfs_remove(vport->debug_cpucheck); /* cpucheck */
+	vport->debug_cpucheck = NULL;
+
+	if (vport->vport_debugfs_root) {
+		debugfs_remove(vport->vport_debugfs_root); /* vportX */
+		vport->vport_debugfs_root = NULL;
+		atomic_dec(&phba->debugfs_vport_count);
+	}
+
+	if (atomic_read(&phba->debugfs_vport_count) == 0) {
+
+		debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
+		phba->debug_hbqinfo = NULL;
+
+		debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
+		phba->debug_dumpHBASlim = NULL;
+
+		debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
+		phba->debug_dumpHostSlim = NULL;
+
+		debugfs_remove(phba->debug_dumpData); /* dumpData */
+		phba->debug_dumpData = NULL;
+
+		debugfs_remove(phba->debug_dumpDif); /* dumpDif */
+		phba->debug_dumpDif = NULL;
+
+		debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
+		phba->debug_InjErrLBA = NULL;
+
+		debugfs_remove(phba->debug_InjErrNPortID);
+		phba->debug_InjErrNPortID = NULL;
+
+		debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+		phba->debug_InjErrWWPN = NULL;
+
+		debugfs_remove(phba->debug_writeGuard); /* writeGuard */
+		phba->debug_writeGuard = NULL;
+
+		debugfs_remove(phba->debug_writeApp); /* writeApp */
+		phba->debug_writeApp = NULL;
+
+		debugfs_remove(phba->debug_writeRef); /* writeRef */
+		phba->debug_writeRef = NULL;
+
+		debugfs_remove(phba->debug_readGuard); /* readGuard */
+		phba->debug_readGuard = NULL;
+
+		debugfs_remove(phba->debug_readApp); /* readApp */
+		phba->debug_readApp = NULL;
+
+		debugfs_remove(phba->debug_readRef); /* readRef */
+		phba->debug_readRef = NULL;
+
+		kfree(phba->slow_ring_trc);
+		phba->slow_ring_trc = NULL;
+
+		/* slow_ring_trace */
+		debugfs_remove(phba->debug_slow_ring_trc);
+		phba->debug_slow_ring_trc = NULL;
+
+		debugfs_remove(phba->debug_nvmeio_trc);
+		phba->debug_nvmeio_trc = NULL;
+
+		kfree(phba->nvmeio_trc);
+		phba->nvmeio_trc = NULL;
+
+		/*
+		 * iDiag release
+		 */
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			/* iDiag extAcc */
+			debugfs_remove(phba->idiag_ext_acc);
+			phba->idiag_ext_acc = NULL;
+
+			/* iDiag mbxAcc */
+			debugfs_remove(phba->idiag_mbx_acc);
+			phba->idiag_mbx_acc = NULL;
+
+			/* iDiag ctlAcc */
+			debugfs_remove(phba->idiag_ctl_acc);
+			phba->idiag_ctl_acc = NULL;
+
+			/* iDiag drbAcc */
+			debugfs_remove(phba->idiag_drb_acc);
+			phba->idiag_drb_acc = NULL;
+
+			/* iDiag queAcc */
+			debugfs_remove(phba->idiag_que_acc);
+			phba->idiag_que_acc = NULL;
+
+			/* iDiag queInfo */
+			debugfs_remove(phba->idiag_que_info);
+			phba->idiag_que_info = NULL;
+
+			/* iDiag barAcc */
+			debugfs_remove(phba->idiag_bar_acc);
+			phba->idiag_bar_acc = NULL;
+
+			/* iDiag pciCfg */
+			debugfs_remove(phba->idiag_pci_cfg);
+			phba->idiag_pci_cfg = NULL;
+
+			/* Finally remove the iDiag debugfs root */
+			debugfs_remove(phba->idiag_root);
+			phba->idiag_root = NULL;
+		}
+
+		if (phba->hba_debugfs_root) {
+			debugfs_remove(phba->hba_debugfs_root); /* fnX */
+			phba->hba_debugfs_root = NULL;
+			atomic_dec(&lpfc_debugfs_hba_count);
+		}
+
+		if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+			debugfs_remove(lpfc_debugfs_root); /* lpfc */
+			lpfc_debugfs_root = NULL;
+		}
+	}
+#endif
+	return;
+}
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_all_queues - dump all the queues with a hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps entries of all the queues asociated with the @phba.
+ **/
+void
+lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
+{
+	int idx;
+
+	/*
+	 * Dump Work Queues (WQs)
+	 */
+	lpfc_debug_dump_wq(phba, DUMP_MBX, 0);
+	lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
+	lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
+
+	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+		lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
+
+	for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+		lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
+
+	lpfc_debug_dump_hdr_rq(phba);
+	lpfc_debug_dump_dat_rq(phba);
+	/*
+	 * Dump Complete Queues (CQs)
+	 */
+	lpfc_debug_dump_cq(phba, DUMP_MBX, 0);
+	lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
+	lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
+
+	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+		lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
+
+	for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+		lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
+
+	/*
+	 * Dump Event Queues (EQs)
+	 */
+	for (idx = 0; idx < phba->io_channel_irqs; idx++)
+		lpfc_debug_dump_hba_eq(phba, idx);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_debugfs.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 0000000..c4edd87
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,710 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#ifndef _H_LPFC_DEBUG_FS
+#define _H_LPFC_DEBUG_FS
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* dumpSLIqinfo output buffer size */
+#define	LPFC_DUMPSLIQINFO_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/* nvmestat output buffer size */
+#define LPFC_NVMESTAT_SIZE 8192
+#define LPFC_NVMEKTIME_SIZE 8192
+#define LPFC_CPUCHECK_SIZE 8192
+#define LPFC_NVMEIO_TRC_SIZE 8192
+
+#define LPFC_DEBUG_OUT_LINE_SZ	80
+
+/*
+ * For SLI4 iDiag debugfs diagnostics tool
+ */
+
+/* pciConf */
+#define LPFC_PCI_CFG_BROWSE 0xffff
+#define LPFC_PCI_CFG_RD_CMD_ARG 2
+#define LPFC_PCI_CFG_WR_CMD_ARG 3
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+#define IDIAG_PCICFG_WHERE_INDX 0
+#define IDIAG_PCICFG_COUNT_INDX 1
+#define IDIAG_PCICFG_VALUE_INDX 2
+
+/* barAcc */
+#define LPFC_PCI_BAR_BROWSE 0xffff
+#define LPFC_PCI_BAR_RD_CMD_ARG 3
+#define LPFC_PCI_BAR_WR_CMD_ARG 3
+
+#define LPFC_PCI_IF0_BAR0_SIZE (1024 *  16)
+#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
+#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
+#define LPFC_PCI_IF2_BAR0_SIZE (1024 *  32)
+
+#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
+#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
+
+#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
+#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
+#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
+#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
+
+#define IDIAG_BARACC_BAR_NUM_INDX 0
+#define IDIAG_BARACC_OFF_SET_INDX 1
+#define IDIAG_BARACC_ACC_MOD_INDX 2
+#define IDIAG_BARACC_REG_VAL_INDX 2
+#define IDIAG_BARACC_BAR_SZE_INDX 3
+
+#define IDIAG_BARACC_BAR_0 0
+#define IDIAG_BARACC_BAR_1 1
+#define IDIAG_BARACC_BAR_2 2
+
+#define SINGLE_WORD 1
+
+/* queue info */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
+
+/* queue acc */
+#define LPFC_QUE_ACC_BROWSE 0xffff
+#define LPFC_QUE_ACC_RD_CMD_ARG 4
+#define LPFC_QUE_ACC_WR_CMD_ARG 6
+#define LPFC_QUE_ACC_BUF_SIZE 4096
+#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2)
+
+#define LPFC_IDIAG_EQ 1
+#define LPFC_IDIAG_CQ 2
+#define LPFC_IDIAG_MQ 3
+#define LPFC_IDIAG_WQ 4
+#define LPFC_IDIAG_RQ 5
+
+#define IDIAG_QUEACC_QUETP_INDX 0
+#define IDIAG_QUEACC_QUEID_INDX 1
+#define IDIAG_QUEACC_INDEX_INDX 2
+#define IDIAG_QUEACC_COUNT_INDX 3
+#define IDIAG_QUEACC_OFFST_INDX 4
+#define IDIAG_QUEACC_VALUE_INDX 5
+
+/* doorbell register acc */
+#define LPFC_DRB_ACC_ALL 0xffff
+#define LPFC_DRB_ACC_RD_CMD_ARG 1
+#define LPFC_DRB_ACC_WR_CMD_ARG 2
+#define LPFC_DRB_ACC_BUF_SIZE 256
+
+#define LPFC_DRB_EQCQ 1
+#define LPFC_DRB_MQ   2
+#define LPFC_DRB_WQ   3
+#define LPFC_DRB_RQ   4
+
+#define LPFC_DRB_MAX  4
+
+#define IDIAG_DRBACC_REGID_INDX 0
+#define IDIAG_DRBACC_VALUE_INDX 1
+
+/* control register acc */
+#define LPFC_CTL_ACC_ALL 0xffff
+#define LPFC_CTL_ACC_RD_CMD_ARG 1
+#define LPFC_CTL_ACC_WR_CMD_ARG 2
+#define LPFC_CTL_ACC_BUF_SIZE 256
+
+#define LPFC_CTL_PORT_SEM  1
+#define LPFC_CTL_PORT_STA  2
+#define LPFC_CTL_PORT_CTL  3
+#define LPFC_CTL_PORT_ER1  4
+#define LPFC_CTL_PORT_ER2  5
+#define LPFC_CTL_PDEV_CTL  6
+
+#define LPFC_CTL_MAX  6
+
+#define IDIAG_CTLACC_REGID_INDX 0
+#define IDIAG_CTLACC_VALUE_INDX 1
+
+/* mailbox access */
+#define LPFC_MBX_DMP_ARG 4
+
+#define LPFC_MBX_ACC_BUF_SIZE 512
+#define LPFC_MBX_ACC_LBUF_SZ 128
+
+#define LPFC_MBX_DMP_MBX_WORD 0x00000001
+#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
+#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
+
+#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
+#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
+#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
+#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
+#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
+			      LPFC_BSG_DMP_MBX_RD_BUF | \
+			      LPFC_BSG_DMP_MBX_WR_MBX | \
+			      LPFC_BSG_DMP_MBX_WR_BUF)
+
+#define LPFC_MBX_DMP_ALL 0xffff
+#define LPFC_MBX_ALL_CMD 0xff
+
+#define IDIAG_MBXACC_MBCMD_INDX 0
+#define IDIAG_MBXACC_DPMAP_INDX 1
+#define IDIAG_MBXACC_DPCNT_INDX 2
+#define IDIAG_MBXACC_WDCNT_INDX 3
+
+/* extents access */
+#define LPFC_EXT_ACC_CMD_ARG 1
+#define LPFC_EXT_ACC_BUF_SIZE 4096
+
+#define LPFC_EXT_ACC_AVAIL 0x1
+#define LPFC_EXT_ACC_ALLOC 0x2
+#define LPFC_EXT_ACC_DRIVR 0x4
+#define LPFC_EXT_ACC_ALL   (LPFC_EXT_ACC_DRIVR | \
+			    LPFC_EXT_ACC_AVAIL | \
+			    LPFC_EXT_ACC_ALLOC)
+
+#define IDIAG_EXTACC_EXMAP_INDX 0
+
+#define SIZE_U8  sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+#define lpfc_nvmeio_data(phba, fmt, arg...) \
+	{ \
+	if (phba->nvmeio_trc_on) \
+		lpfc_debugfs_nvme_trc(phba, fmt, ##arg); \
+	}
+
+struct lpfc_debug {
+	char *i_private;
+	char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+	char *buffer;
+	int  len;
+};
+
+struct lpfc_debugfs_trc {
+	char *fmt;
+	uint32_t data1;
+	uint32_t data2;
+	uint32_t data3;
+	uint32_t seq_cnt;
+	unsigned long jif;
+};
+
+struct lpfc_debugfs_nvmeio_trc {
+	char *fmt;
+	uint16_t data1;
+	uint16_t data2;
+	uint32_t data3;
+};
+
+struct lpfc_idiag_offset {
+	uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 8
+struct lpfc_idiag_cmd {
+	uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+
+#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
+#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
+#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
+#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
+
+#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
+#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
+#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
+#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014
+
+#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021
+#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
+#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
+#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
+
+#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
+#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
+#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
+#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
+
+#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
+#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
+
+#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
+
+	uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+	uint32_t active;
+	struct lpfc_idiag_cmd cmd;
+	struct lpfc_idiag_offset offset;
+	void *ptr_private;
+};
+
+#else
+
+#define lpfc_nvmeio_data(phba, fmt, arg...) \
+	no_printk(fmt, ##arg)
+
+#endif
+
+enum {
+	DUMP_FCP,
+	DUMP_NVME,
+	DUMP_MBX,
+	DUMP_ELS,
+	DUMP_NVMELS,
+};
+
+/* Mask for discovery_trace */
+#define LPFC_DISC_TRC_ELS_CMD		0x1	/* Trace ELS commands */
+#define LPFC_DISC_TRC_ELS_RSP		0x2	/* Trace ELS response */
+#define LPFC_DISC_TRC_ELS_UNSOL		0x4	/* Trace ELS rcv'ed   */
+#define LPFC_DISC_TRC_ELS_ALL		0x7	/* Trace ELS */
+#define LPFC_DISC_TRC_MBOX_VPORT	0x8	/* Trace vport MBOXs */
+#define LPFC_DISC_TRC_MBOX		0x10	/* Trace other MBOXs */
+#define LPFC_DISC_TRC_MBOX_ALL		0x18	/* Trace all MBOXs */
+#define LPFC_DISC_TRC_CT		0x20	/* Trace disc CT requests */
+#define LPFC_DISC_TRC_DSM		0x40    /* Trace DSM events */
+#define LPFC_DISC_TRC_RPORT		0x80    /* Trace rport events */
+#define LPFC_DISC_TRC_NODE		0x100   /* Trace ndlp state changes */
+
+#define LPFC_DISC_TRC_DISCOVERY		0xef    /* common mask for general
+						 * discovery */
+#endif /* H_LPFC_DEBUG_FS */
+
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_qe - dump an specific entry from a queue
+ * @q: Pointer to the queue descriptor.
+ * @idx: Index to the entry on the queue.
+ *
+ * This function dumps an entry indexed by @idx from a queue specified by the
+ * queue descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
+{
+	char line_buf[LPFC_LBUF_SZ];
+	int i, esize, qe_word_cnt, len;
+	uint32_t *pword;
+
+	/* sanity checks */
+	if (!q)
+		return;
+	if (idx >= q->entry_count)
+		return;
+
+	esize = q->entry_size;
+	qe_word_cnt = esize / sizeof(uint32_t);
+	pword = q->qe[idx].address;
+
+	len = 0;
+	len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+	if (qe_word_cnt > 8)
+		printk(KERN_ERR "%s\n", line_buf);
+
+	for (i = 0; i < qe_word_cnt; i++) {
+		if (!(i % 8)) {
+			if (i != 0)
+				printk(KERN_ERR "%s\n", line_buf);
+			if (qe_word_cnt > 8) {
+				len = 0;
+				memset(line_buf, 0, LPFC_LBUF_SZ);
+				len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+		}
+		len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+				((uint32_t)*pword) & 0xffffffff);
+		pword++;
+	}
+	if (qe_word_cnt <= 8 || (i - 1) % 8)
+		printk(KERN_ERR "%s\n", line_buf);
+}
+
+/**
+ * lpfc_debug_dump_q - dump all entries from an specific queue
+ * @q: Pointer to the queue descriptor.
+ *
+ * This function dumps all entries from a queue specified by the queue
+ * descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_q(struct lpfc_queue *q)
+{
+	int idx, entry_count;
+
+	/* sanity check */
+	if (!q)
+		return;
+
+	dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev,
+		"%d: [qid:%d, type:%d, subtype:%d, "
+		"qe_size:%d, qe_count:%d, "
+		"host_index:%d, port_index:%d]\n",
+		(q->phba)->brd_no,
+		q->queue_id, q->type, q->subtype,
+		q->entry_size, q->entry_count,
+		q->host_index, q->hba_index);
+	entry_count = q->entry_count;
+	for (idx = 0; idx < entry_count; idx++)
+		lpfc_debug_dump_qe(q, idx);
+	printk(KERN_ERR "\n");
+}
+
+/**
+ * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue
+ * @phba: Pointer to HBA context object.
+ * @wqidx: Index to a FCP or NVME work queue.
+ *
+ * This function dumps all entries from a FCP or NVME work queue specified
+ * by the wqidx.
+ **/
+static inline void
+lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
+{
+	struct lpfc_queue *wq;
+	char *qtypestr;
+
+	if (qtype == DUMP_FCP) {
+		wq = phba->sli4_hba.fcp_wq[wqidx];
+		qtypestr = "FCP";
+	} else if (qtype == DUMP_NVME) {
+		wq = phba->sli4_hba.nvme_wq[wqidx];
+		qtypestr = "NVME";
+	} else if (qtype == DUMP_MBX) {
+		wq = phba->sli4_hba.mbx_wq;
+		qtypestr = "MBX";
+	} else if (qtype == DUMP_ELS) {
+		wq = phba->sli4_hba.els_wq;
+		qtypestr = "ELS";
+	} else if (qtype == DUMP_NVMELS) {
+		wq = phba->sli4_hba.nvmels_wq;
+		qtypestr = "NVMELS";
+	} else
+		return;
+
+	if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+		pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
+			qtypestr, wqidx, wq->queue_id);
+	else
+		pr_err("%s WQ: WQ[Qid:%d]\n",
+			qtypestr, wq->queue_id);
+
+	lpfc_debug_dump_q(wq);
+}
+
+/**
+ * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's
+ * cmpl queue
+ * @phba: Pointer to HBA context object.
+ * @wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP or NVME completion queue
+ * which is associated to the work queue specified by the @wqidx.
+ **/
+static inline void
+lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
+{
+	struct lpfc_queue *wq, *cq, *eq;
+	char *qtypestr;
+	int eqidx;
+
+	/* fcp/nvme wq and cq are 1:1, thus same indexes */
+
+	if (qtype == DUMP_FCP) {
+		wq = phba->sli4_hba.fcp_wq[wqidx];
+		cq = phba->sli4_hba.fcp_cq[wqidx];
+		qtypestr = "FCP";
+	} else if (qtype == DUMP_NVME) {
+		wq = phba->sli4_hba.nvme_wq[wqidx];
+		cq = phba->sli4_hba.nvme_cq[wqidx];
+		qtypestr = "NVME";
+	} else if (qtype == DUMP_MBX) {
+		wq = phba->sli4_hba.mbx_wq;
+		cq = phba->sli4_hba.mbx_cq;
+		qtypestr = "MBX";
+	} else if (qtype == DUMP_ELS) {
+		wq = phba->sli4_hba.els_wq;
+		cq = phba->sli4_hba.els_cq;
+		qtypestr = "ELS";
+	} else if (qtype == DUMP_NVMELS) {
+		wq = phba->sli4_hba.nvmels_wq;
+		cq = phba->sli4_hba.nvmels_cq;
+		qtypestr = "NVMELS";
+	} else
+		return;
+
+	for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
+		if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
+			break;
+	}
+	if (eqidx == phba->io_channel_irqs) {
+		pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
+		eqidx = 0;
+	}
+
+	eq = phba->sli4_hba.hba_eq[eqidx];
+
+	if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+		pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
+			"->EQ[Idx:%d|Qid:%d]:\n",
+			qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
+			eqidx, eq->queue_id);
+	else
+		pr_err("%s CQ: WQ[Qid:%d]->CQ[Qid:%d]"
+			"->EQ[Idx:%d|Qid:%d]:\n",
+			qtypestr, wq->queue_id, cq->queue_id,
+			eqidx, eq->queue_id);
+
+	lpfc_debug_dump_q(cq);
+}
+
+/**
+ * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP event queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
+{
+	struct lpfc_queue *qp;
+
+	qp = phba->sli4_hba.hba_eq[qidx];
+
+	pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
+
+	lpfc_debug_dump_q(qp);
+}
+
+/**
+ * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive data queue.
+ **/
+static inline void
+lpfc_debug_dump_dat_rq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n",
+		phba->sli4_hba.dat_rq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+}
+
+/**
+ * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive header queue.
+ **/
+static inline void
+lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n",
+		phba->sli4_hba.hdr_rq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+}
+
+/**
+ * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Work queue identifier.
+ *
+ * This function dumps all entries from a work queue identified by the queue
+ * identifier.
+ **/
+static inline void
+lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
+{
+	int wq_idx;
+
+	for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
+		if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+			break;
+	if (wq_idx < phba->cfg_fcp_io_channel) {
+		pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+		return;
+	}
+
+	for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
+		if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
+			break;
+	if (wq_idx < phba->cfg_nvme_io_channel) {
+		pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+		return;
+	}
+
+	if (phba->sli4_hba.els_wq->queue_id == qid) {
+		pr_err("ELS WQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+		return;
+	}
+
+	if (phba->sli4_hba.nvmels_wq->queue_id == qid) {
+		pr_err("NVME LS WQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Mbox work queue identifier.
+ *
+ * This function dumps all entries from a mbox work queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid)
+{
+	if (phba->sli4_hba.mbx_wq->queue_id == qid) {
+		printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Receive queue identifier.
+ *
+ * This function dumps all entries from a receive queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
+{
+	if (phba->sli4_hba.hdr_rq->queue_id == qid) {
+		printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+		return;
+	}
+	if (phba->sli4_hba.dat_rq->queue_id == qid) {
+		printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from a complete queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
+{
+	int cq_idx;
+
+	for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
+		if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+			break;
+
+	if (cq_idx < phba->cfg_fcp_io_channel) {
+		pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+		return;
+	}
+
+	for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
+		if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
+			break;
+
+	if (cq_idx < phba->cfg_nvme_io_channel) {
+		pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+		return;
+	}
+
+	if (phba->sli4_hba.els_cq->queue_id == qid) {
+		pr_err("ELS CQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+		return;
+	}
+
+	if (phba->sli4_hba.nvmels_cq->queue_id == qid) {
+		pr_err("NVME LS CQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq);
+		return;
+	}
+
+	if (phba->sli4_hba.mbx_cq->queue_id == qid) {
+		pr_err("MBX CQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from an event queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
+{
+	int eq_idx;
+
+	for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
+		if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
+			break;
+
+	if (eq_idx < phba->io_channel_irqs) {
+		printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
+		return;
+	}
+}
+
+void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_disc.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_disc.h
new file mode 100644
index 0000000..f9a566e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_disc.h
@@ -0,0 +1,291 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define FC_MAX_HOLD_RSCN     32	      /* max number of deferred RSCNs */
+#define FC_MAX_NS_RSP        64512    /* max size NameServer rsp */
+#define FC_MAXLOOP           126      /* max devices supported on a fc loop */
+#define LPFC_DISC_FLOGI_TMO  10	      /* Discovery FLOGI ratov */
+
+
+/* This is the protocol dependent definition for a Node List Entry.
+ * This is used by Fibre Channel protocol to support FCP.
+ */
+
+/* worker thread events */
+enum lpfc_work_type {
+	LPFC_EVT_ONLINE,
+	LPFC_EVT_OFFLINE_PREP,
+	LPFC_EVT_OFFLINE,
+	LPFC_EVT_WARM_START,
+	LPFC_EVT_KILL,
+	LPFC_EVT_ELS_RETRY,
+	LPFC_EVT_DEV_LOSS,
+	LPFC_EVT_FASTPATH_MGMT_EVT,
+	LPFC_EVT_RESET_HBA,
+};
+
+/* structure used to queue event to the discovery tasklet */
+struct lpfc_work_evt {
+	struct list_head      evt_listp;
+	void                 *evt_arg1;
+	void                 *evt_arg2;
+	enum lpfc_work_type   evt;
+};
+
+struct lpfc_scsi_check_condition_event;
+struct lpfc_scsi_varqueuedepth_event;
+struct lpfc_scsi_event_header;
+struct lpfc_fabric_event_header;
+struct lpfc_fcprdchkerr_event;
+
+/* structure used for sending events from fast path */
+struct lpfc_fast_path_event {
+	struct lpfc_work_evt work_evt;
+	struct lpfc_vport     *vport;
+	union {
+		struct lpfc_scsi_check_condition_event check_cond_evt;
+		struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
+		struct lpfc_scsi_event_header scsi_evt;
+		struct lpfc_fabric_event_header fabric_evt;
+		struct lpfc_fcprdchkerr_event read_check_error;
+	} un;
+};
+
+#define LPFC_SLI4_MAX_XRI	1024	/* Used to make the ndlp's xri_bitmap */
+#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG)
+struct lpfc_node_rrqs {
+	unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
+};
+
+struct lpfc_nodelist {
+	struct list_head nlp_listp;
+	struct lpfc_name nlp_portname;
+	struct lpfc_name nlp_nodename;
+	uint32_t         nlp_flag;		/* entry flags */
+	uint32_t         nlp_DID;		/* FC D_ID of entry */
+	uint32_t         nlp_last_elscmd;	/* Last ELS cmd sent */
+	uint16_t         nlp_type;
+#define NLP_FC_NODE        0x1			/* entry is an FC node */
+#define NLP_FABRIC         0x4			/* entry rep a Fabric entity */
+#define NLP_FCP_TARGET     0x8			/* entry is an FCP target */
+#define NLP_FCP_INITIATOR  0x10			/* entry is an FCP Initiator */
+#define NLP_NVME_TARGET    0x20			/* entry is a NVME Target */
+#define NLP_NVME_INITIATOR 0x40			/* entry is a NVME Initiator */
+#define NLP_NVME_DISCOVERY 0x80                 /* entry has NVME disc srvc */
+
+	uint16_t	nlp_fc4_type;		/* FC types node supports. */
+						/* Assigned from GID_FF, only
+						 * FCP (0x8) and NVME (0x28)
+						 * supported.
+						 */
+#define NLP_FC4_NONE	0x0
+#define NLP_FC4_FCP	0x1			/* FC4 Type FCP (value x8)) */
+#define NLP_FC4_NVME	0x2			/* FC4 TYPE NVME (value x28) */
+
+	uint16_t        nlp_rpi;
+	uint16_t        nlp_state;		/* state transition indicator */
+	uint16_t        nlp_prev_state;		/* state transition indicator */
+	uint16_t        nlp_xri;		/* output exchange id for RPI */
+	uint16_t        nlp_sid;		/* scsi id */
+#define NLP_NO_SID		0xffff
+	uint16_t	nlp_maxframe;		/* Max RCV frame size */
+	uint8_t		nlp_class_sup;		/* Supported Classes */
+	uint8_t         nlp_retry;		/* used for ELS retries */
+	uint8_t         nlp_fcp_info;	        /* class info, bits 0-3 */
+#define NLP_FCP_2_DEVICE   0x10			/* FCP-2 device */
+
+	uint16_t        nlp_usg_map;	/* ndlp management usage bitmap */
+#define NLP_USG_NODE_ACT_BIT	0x1	/* Indicate ndlp is actively used */
+#define NLP_USG_IACT_REQ_BIT	0x2	/* Request to inactivate ndlp */
+#define NLP_USG_FREE_REQ_BIT	0x4	/* Request to invoke ndlp memory free */
+#define NLP_USG_FREE_ACK_BIT	0x8	/* Indicate ndlp memory free invoked */
+
+	struct timer_list   nlp_delayfunc;	/* Used for delayed ELS cmds */
+	struct lpfc_hba *phba;
+	struct fc_rport *rport;		/* scsi_transport_fc port structure */
+	struct lpfc_nvme_rport *nrport;	/* nvme transport rport struct. */
+	struct lpfc_vport *vport;
+	struct lpfc_work_evt els_retry_evt;
+	struct lpfc_work_evt dev_loss_evt;
+	struct kref     kref;
+	atomic_t cmd_pending;
+	uint32_t cmd_qdepth;
+	unsigned long last_change_time;
+	unsigned long *active_rrqs_xri_bitmap;
+	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */
+	uint32_t fc4_prli_sent;
+	uint32_t upcall_flags;
+	uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
+#define NVME_FB_BIT_SHIFT 9    /* PRLI Rsp first burst in 512B units. */
+};
+struct lpfc_node_rrq {
+	struct list_head list;
+	uint16_t xritag;
+	uint16_t send_rrq;
+	uint16_t rxid;
+	uint32_t         nlp_DID;		/* FC D_ID of entry */
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	unsigned long rrq_stop_time;
+};
+
+/* Defines for nlp_flag (uint32) */
+#define NLP_IGNR_REG_CMPL  0x00000001 /* Rcvd rscn before we cmpl reg login */
+#define NLP_REG_LOGIN_SEND 0x00000002   /* sent reglogin to adapter */
+#define NLP_SUPPRESS_RSP   0x00000010	/* Remote NPort supports suppress rsp */
+#define NLP_PLOGI_SND      0x00000020	/* sent PLOGI request for this entry */
+#define NLP_PRLI_SND       0x00000040	/* sent PRLI request for this entry */
+#define NLP_ADISC_SND      0x00000080	/* sent ADISC request for this entry */
+#define NLP_LOGO_SND       0x00000100	/* sent LOGO request for this entry */
+#define NLP_RNID_SND       0x00000400	/* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK   0x000007e0	/* sent ELS request for this entry */
+#define NLP_NVMET_RECOV    0x00001000   /* NVMET auditing node for recovery. */
+#define NLP_FCP_PRLI_RJT   0x00002000   /* Rport does not support FCP PRLI. */
+#define NLP_DEFER_RM       0x00010000	/* Remove this ndlp if no longer used */
+#define NLP_DELAY_TMO      0x00020000	/* delay timeout is running for node */
+#define NLP_NPR_2B_DISC    0x00040000	/* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI      0x00080000	/* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC       0x00100000	/* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID  0x00200000	/* good PRLI but no binding for scsid */
+#define NLP_ISSUE_LOGO     0x00400000	/* waiting to issue a LOGO */
+#define NLP_IN_DEV_LOSS    0x00800000	/* devloss in progress */
+#define NLP_ACC_REGLOGIN   0x01000000	/* Issue Reg Login after successful
+					   ACC */
+#define NLP_NPR_ADISC      0x02000000	/* Issue ADISC when dq'ed from
+					   NPR list */
+#define NLP_RM_DFLT_RPI    0x04000000	/* need to remove leftover dflt RPI */
+#define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
+#define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
+#define NLP_SC_REQ         0x20000000	/* Target requires authentication */
+#define NLP_FIRSTBURST     0x40000000	/* Target supports FirstBurst */
+#define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
+
+
+/* ndlp usage management macros */
+#define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \
+						& NLP_USG_NODE_ACT_BIT) \
+					&& \
+					!((ndlp)->nlp_usg_map \
+						& NLP_USG_FREE_ACK_BIT))
+#define NLP_SET_NODE_ACT(ndlp)		((ndlp)->nlp_usg_map \
+						|= NLP_USG_NODE_ACT_BIT)
+#define NLP_INT_NODE_ACT(ndlp)		((ndlp)->nlp_usg_map \
+						= NLP_USG_NODE_ACT_BIT)
+#define NLP_CLR_NODE_ACT(ndlp)		((ndlp)->nlp_usg_map \
+						&= ~NLP_USG_NODE_ACT_BIT)
+#define NLP_CHK_IACT_REQ(ndlp)          ((ndlp)->nlp_usg_map \
+						& NLP_USG_IACT_REQ_BIT)
+#define NLP_SET_IACT_REQ(ndlp)          ((ndlp)->nlp_usg_map \
+						|= NLP_USG_IACT_REQ_BIT)
+#define NLP_CHK_FREE_REQ(ndlp)		((ndlp)->nlp_usg_map \
+						& NLP_USG_FREE_REQ_BIT)
+#define NLP_SET_FREE_REQ(ndlp)		((ndlp)->nlp_usg_map \
+						|= NLP_USG_FREE_REQ_BIT)
+#define NLP_CHK_FREE_ACK(ndlp)		((ndlp)->nlp_usg_map \
+						& NLP_USG_FREE_ACK_BIT)
+#define NLP_SET_FREE_ACK(ndlp)		((ndlp)->nlp_usg_map \
+						|= NLP_USG_FREE_ACK_BIT)
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+ * when Link Up discovery or Registered State Change Notification (RSCN)
+ * processing is needed.  Each list holds the nodes that require a PLOGI or
+ * ADISC Extended Link Service (ELS) request.  These lists keep track of the
+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
+ * by Link Up) event.  The unmapped_list contains all nodes that have
+ * successfully logged into at the Fibre Channel level.  The
+ * mapped_list will contain all nodes that are mapped FCP targets.
+ *
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+
+/* Defines for nlp_state */
+#define NLP_STE_UNUSED_NODE       0x0	/* node is just allocated */
+#define NLP_STE_PLOGI_ISSUE       0x1	/* PLOGI was sent to NL_PORT */
+#define NLP_STE_ADISC_ISSUE       0x2	/* ADISC was sent to NL_PORT */
+#define NLP_STE_REG_LOGIN_ISSUE   0x3	/* REG_LOGIN was issued for NL_PORT */
+#define NLP_STE_PRLI_ISSUE        0x4	/* PRLI was sent to NL_PORT */
+#define NLP_STE_LOGO_ISSUE	  0x5	/* LOGO was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE     0x6	/* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE       0x7	/* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE          0x8	/* NPort disappeared */
+#define NLP_STE_MAX_STATE         0x9
+#define NLP_STE_FREED_NODE        0xff	/* node entry was freed to MEM_NLP */
+
+/* For UNUSED_NODE state, the node has just been allocated.
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to PRLI_COMPL. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list.  32 entries are processed initially and
+ * ADISC is initited for each one.  Completions / Events for each node are
+ * funnelled thru the state machine.  As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list.  32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine.  As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * identically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+/* Defines for Node List Entry Events that could happen */
+#define NLP_EVT_RCV_PLOGI         0x0	/* Rcv'd an ELS PLOGI command */
+#define NLP_EVT_RCV_PRLI          0x1	/* Rcv'd an ELS PRLI  command */
+#define NLP_EVT_RCV_LOGO          0x2	/* Rcv'd an ELS LOGO  command */
+#define NLP_EVT_RCV_ADISC         0x3	/* Rcv'd an ELS ADISC command */
+#define NLP_EVT_RCV_PDISC         0x4	/* Rcv'd an ELS PDISC command */
+#define NLP_EVT_RCV_PRLO          0x5	/* Rcv'd an ELS PRLO  command */
+#define NLP_EVT_CMPL_PLOGI        0x6	/* Sent an ELS PLOGI command */
+#define NLP_EVT_CMPL_PRLI         0x7	/* Sent an ELS PRLI  command */
+#define NLP_EVT_CMPL_LOGO         0x8	/* Sent an ELS LOGO  command */
+#define NLP_EVT_CMPL_ADISC        0x9	/* Sent an ELS ADISC command */
+#define NLP_EVT_CMPL_REG_LOGIN    0xa	/* REG_LOGIN mbox cmd completed */
+#define NLP_EVT_DEVICE_RM         0xb	/* Device not found in NS / ALPAmap */
+#define NLP_EVT_DEVICE_RECOVERY   0xc	/* Device existence unknown */
+#define NLP_EVT_MAX_EVENT         0xd
+
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_els.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_els.c
new file mode 100644
index 0000000..566e8d0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_els.c
@@ -0,0 +1,9527 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+/* See Fibre Channel protocol T11 FC-LS for details */
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+			  struct lpfc_iocbq *);
+static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
+			struct lpfc_iocbq *);
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
+static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
+				struct lpfc_nodelist *ndlp, uint8_t retry);
+static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
+				  struct lpfc_iocbq *iocb);
+
+static int lpfc_max_els_tries = 3;
+
+/**
+ * lpfc_els_chk_latt - Check host link attention event for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there is an outstanding host link
+ * attention event during the discovery process with the @vport. It is done
+ * by reading the HBA's Host Attention (HA) register. If there is any host
+ * link attention events during this @vport's discovery process, the @vport
+ * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
+ * be issued if the link state is not already in host link cleared state,
+ * and a return code shall indicate whether the host link attention event
+ * had happened.
+ *
+ * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
+ * state in LPFC_VPORT_READY, the request for checking host link attention
+ * event will be ignored and a return code shall indicate no host link
+ * attention event had happened.
+ *
+ * Return codes
+ *   0 - no host link attention event happened
+ *   1 - host link attention event happened
+ **/
+int
+lpfc_els_chk_latt(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	uint32_t ha_copy;
+
+	if (vport->port_state >= LPFC_VPORT_READY ||
+	    phba->link_state == LPFC_LINK_DOWN ||
+	    phba->sli_rev > LPFC_SLI_REV3)
+		return 0;
+
+	/* Read the HBA Host Attention Register */
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return 1;
+
+	if (!(ha_copy & HA_LATT))
+		return 0;
+
+	/* Pending Link Event during Discovery */
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0237 Pending Link Event during "
+			 "Discovery: State x%x\n",
+			 phba->pport->port_state);
+
+	/* CLEAR_LA should re-enable link attention events and
+	 * we should then immediately take a LATT event. The
+	 * LATT processing should call lpfc_linkdown() which
+	 * will cleanup any left over in-progress discovery
+	 * events.
+	 */
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_ABORT_DISCOVERY;
+	spin_unlock_irq(shost->host_lock);
+
+	if (phba->link_state != LPFC_CLEAR_LA)
+		lpfc_issue_clear_la(phba, vport);
+
+	return 1;
+}
+
+/**
+ * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @expectRsp: flag indicating whether response is expected.
+ * @cmdSize: size of the ELS command.
+ * @retry: number of retries to the command IOCB when it fails.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: destination identifier.
+ * @elscmd: the ELS command code.
+ *
+ * This routine is used for allocating a lpfc-IOCB data structure from
+ * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the ELS command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic IOCB data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ *   Pointer to the newly allocated/prepared els iocb data structure
+ *   NULL - when els iocb data structure allocation/preparation failed
+ **/
+struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+		   uint16_t cmdSize, uint8_t retry,
+		   struct lpfc_nodelist *ndlp, uint32_t did,
+		   uint32_t elscmd)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
+	struct ulp_bde64 *bpl;
+	IOCB_t *icmd;
+
+
+	if (!lpfc_is_link_up(phba))
+		return NULL;
+
+	/* Allocate buffer for  command iocb */
+	elsiocb = lpfc_sli_get_iocbq(phba);
+
+	if (elsiocb == NULL)
+		return NULL;
+
+	/*
+	 * If this command is for fabric controller and HBA running
+	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
+	 */
+	if ((did == Fabric_DID) &&
+		(phba->hba_flag & HBA_FIP_SUPPORT) &&
+		((elscmd == ELS_CMD_FLOGI) ||
+		 (elscmd == ELS_CMD_FDISC) ||
+		 (elscmd == ELS_CMD_LOGO)))
+		switch (elscmd) {
+		case ELS_CMD_FLOGI:
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		case ELS_CMD_FDISC:
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		case ELS_CMD_LOGO:
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		}
+	else
+		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
+
+	icmd = &elsiocb->iocb;
+
+	/* fill in BDEs for command */
+	/* Allocate buffer for command payload */
+	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (pcmd)
+		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+	if (!pcmd || !pcmd->virt)
+		goto els_iocb_free_pcmb_exit;
+
+	INIT_LIST_HEAD(&pcmd->list);
+
+	/* Allocate buffer for response payload */
+	if (expectRsp) {
+		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (prsp)
+			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+						     &prsp->phys);
+		if (!prsp || !prsp->virt)
+			goto els_iocb_free_prsp_exit;
+		INIT_LIST_HEAD(&prsp->list);
+	} else
+		prsp = NULL;
+
+	/* Allocate buffer for Buffer ptr list */
+	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (pbuflist)
+		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+						 &pbuflist->phys);
+	if (!pbuflist || !pbuflist->virt)
+		goto els_iocb_free_pbuf_exit;
+
+	INIT_LIST_HEAD(&pbuflist->list);
+
+	if (expectRsp) {
+		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+
+		icmd->un.elsreq64.remoteID = did;		/* DID */
+		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+		if (elscmd == ELS_CMD_FLOGI)
+			icmd->ulpTimeout = FF_DEF_RATOV * 2;
+		else if (elscmd == ELS_CMD_LOGO)
+			icmd->ulpTimeout = phba->fc_ratov;
+		else
+			icmd->ulpTimeout = phba->fc_ratov * 2;
+	} else {
+		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
+		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+	}
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+
+	/*
+	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
+	 * For SLI4, since the driver controls VPIs we also want to include
+	 * all ELS pt2pt protocol traffic as well.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+		((phba->sli_rev == LPFC_SLI_REV4) &&
+		    (vport->fc_flag & FC_PT2PT))) {
+
+		if (expectRsp) {
+			icmd->un.elsreq64.myID = vport->fc_myDID;
+
+			/* For ELS_REQUEST64_CR, use the VPI by default */
+			icmd->ulpContext = phba->vpi_ids[vport->vpi];
+		}
+
+		icmd->ulpCt_h = 0;
+		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+		if (elscmd == ELS_CMD_ECHO)
+			icmd->ulpCt_l = 0; /* context = invalid RPI */
+		else
+			icmd->ulpCt_l = 1; /* context = VPI */
+	}
+
+	bpl = (struct ulp_bde64 *) pbuflist->virt;
+	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
+	bpl->tus.f.bdeSize = cmdSize;
+	bpl->tus.f.bdeFlags = 0;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	if (expectRsp) {
+		bpl++;
+		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
+		bpl->tus.f.bdeSize = FCELSSIZE;
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+	}
+
+	/* prevent preparing iocb with NULL ndlp reference */
+	elsiocb->context1 = lpfc_nlp_get(ndlp);
+	if (!elsiocb->context1)
+		goto els_iocb_free_pbuf_exit;
+	elsiocb->context2 = pcmd;
+	elsiocb->context3 = pbuflist;
+	elsiocb->retry = retry;
+	elsiocb->vport = vport;
+	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
+
+	if (prsp) {
+		list_add(&prsp->list, &pcmd->list);
+	}
+	if (expectRsp) {
+		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "0116 Xmit ELS command x%x to remote "
+				 "NPORT x%x I/O tag: x%x, port state:x%x"
+				 " fc_flag:x%x\n",
+				 elscmd, did, elsiocb->iotag,
+				 vport->port_state,
+				 vport->fc_flag);
+	} else {
+		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "0117 Xmit ELS response x%x to remote "
+				 "NPORT x%x I/O tag: x%x, size: x%x "
+				 "port_state x%x fc_flag x%x\n",
+				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
+				 cmdSize, vport->port_state,
+				 vport->fc_flag);
+	}
+	return elsiocb;
+
+els_iocb_free_pbuf_exit:
+	if (expectRsp)
+		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+	kfree(pbuflist);
+
+els_iocb_free_prsp_exit:
+	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+	kfree(prsp);
+
+els_iocb_free_pcmb_exit:
+	kfree(pcmd);
+	lpfc_sli_release_iocbq(phba, elsiocb);
+	return NULL;
+}
+
+/**
+ * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a fabric registration login for a @vport. An
+ * active ndlp node with Fabric_DID must already exist for this @vport.
+ * The routine invokes two mailbox commands to carry out fabric registration
+ * login through the HBA firmware: the first mailbox command requests the
+ * HBA to perform link configuration for the @vport; and the second mailbox
+ * command requests the HBA to perform the actual fabric registration login
+ * with the @vport.
+ *
+ * Return code
+ *   0 - successfully issued fabric registration login for @vport
+ *   -ENXIO -- failed to issue fabric registration login for @vport
+ **/
+int
+lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ndlp;
+	struct serv_parm *sp;
+	int rc;
+	int err = 0;
+
+	sp = &phba->fc_fabparam;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		err = 1;
+		goto fail;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		err = 2;
+		goto fail;
+	}
+
+	vport->port_state = LPFC_FABRIC_CFG_LINK;
+	lpfc_config_link(phba, mbox);
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->vport = vport;
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		err = 3;
+		goto fail_free_mbox;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		err = 4;
+		goto fail;
+	}
+	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+			  ndlp->nlp_rpi);
+	if (rc) {
+		err = 5;
+		goto fail_free_mbox;
+	}
+
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+	mbox->vport = vport;
+	/* increment the reference count on ndlp to hold reference
+	 * for the callback routine.
+	 */
+	mbox->context2 = lpfc_nlp_get(ndlp);
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		err = 6;
+		goto fail_issue_reg_login;
+	}
+
+	return 0;
+
+fail_issue_reg_login:
+	/* decrement the reference count on ndlp just incremented
+	 * for the failed mbox command.
+	 */
+	lpfc_nlp_put(ndlp);
+	mp = (struct lpfc_dmabuf *) mbox->context1;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+fail_free_mbox:
+	mempool_free(mbox, phba->mbox_mem_pool);
+
+fail:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+		"0249 Cannot issue Register Fabric login: Err %d\n", err);
+	return -ENXIO;
+}
+
+/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mboxq = NULL;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *dmabuf = NULL;
+	int rc = 0;
+
+	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
+	    !(vport->fc_flag & FC_PT2PT)) {
+		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+			rc = -ENODEV;
+			goto fail;
+		}
+	}
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
+	if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
+		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!dmabuf) {
+			rc = -ENOMEM;
+			goto fail;
+		}
+		dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+		if (!dmabuf->virt) {
+			rc = -ENOMEM;
+			goto fail;
+		}
+		memcpy(dmabuf->virt, &phba->fc_fabparam,
+		       sizeof(struct serv_parm));
+	}
+
+	vport->port_state = LPFC_FABRIC_CFG_LINK;
+	if (dmabuf)
+		lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+	else
+		lpfc_reg_vfi(mboxq, vport, 0);
+
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+	mboxq->vport = vport;
+	mboxq->context1 = dmabuf;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		rc = -ENXIO;
+		goto fail;
+	}
+	return 0;
+
+fail:
+	if (mboxq)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	if (dmabuf) {
+		if (dmabuf->virt)
+			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+		"0289 Issue Register VFI failed: Err %d\n", rc);
+	return rc;
+}
+
+/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct Scsi_Host *shost;
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2556 UNREG_VFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+
+	lpfc_unreg_vfi(mboxq, vport);
+	mboxq->vport = vport;
+	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2557 UNREG_VFI issue mbox failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VFI_REGISTERED;
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+
+/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ *   1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+		struct serv_parm *sp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint8_t fabric_param_changed = 0;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if ((vport->fc_prevDID != vport->fc_myDID) ||
+		memcmp(&vport->fabric_portname, &sp->portName,
+			sizeof(struct lpfc_name)) ||
+		memcmp(&vport->fabric_nodename, &sp->nodeName,
+			sizeof(struct lpfc_name)) ||
+		(vport->vport_flag & FAWWPN_PARAM_CHG)) {
+		fabric_param_changed = 1;
+		vport->vport_flag &= ~FAWWPN_PARAM_CHG;
+	}
+	/*
+	 * Word 1 Bit 31 in common service parameter is overloaded.
+	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
+	 * Word 1 Bit 31 in FLOGI response is clean address bit
+	 *
+	 * If fabric parameter is changed and clean address bit is
+	 * cleared delay nport discovery if
+	 * - vport->fc_prevDID != 0 (not initial discovery) OR
+	 * - lpfc_delay_discovery module parameter is set.
+	 */
+	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+	    (vport->fc_prevDID || phba->cfg_delay_discovery)) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_DISC_DELAYED;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	return fabric_param_changed;
+}
+
+
+/**
+ * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into a fabric
+ * port in a fabric topology. It properly sets up the parameters to the @ndlp
+ * from the IOCB response. It also check the newly assigned N_Port ID to the
+ * @vport against the previously assigned N_Port ID. If it is different from
+ * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
+ * is invoked on all the remaining nodes with the @vport to unregister the
+ * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ *   0 - Success (currently, always return 0)
+ **/
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   struct serv_parm *sp, IOCB_t *irsp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_nodelist *np;
+	struct lpfc_nodelist *next_np;
+	uint8_t fabric_param_changed;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_FABRIC;
+	spin_unlock_irq(shost->host_lock);
+
+	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
+	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
+		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+
+	phba->fc_edtovResol = sp->cmn.edtovResolution;
+	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
+
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_PUBLIC_LOOP;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
+	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
+	ndlp->nlp_class_sup = 0;
+	if (sp->cls1.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+	if (sp->cls2.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS2;
+	if (sp->cls3.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS3;
+	if (sp->cls4.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS4;
+	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+				sp->cmn.bbRcvSizeLsb;
+
+	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+	if (fabric_param_changed) {
+		/* Reset FDMI attribute masks based on config parameter */
+		if (phba->cfg_enable_SmartSAN ||
+		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
+			/* Setup appropriate attribute masks */
+			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
+			if (phba->cfg_enable_SmartSAN)
+				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
+			else
+				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+		} else {
+			vport->fdmi_hba_mask = 0;
+			vport->fdmi_port_mask = 0;
+		}
+
+	}
+	memcpy(&vport->fabric_portname, &sp->portName,
+			sizeof(struct lpfc_name));
+	memcpy(&vport->fabric_nodename, &sp->nodeName,
+			sizeof(struct lpfc_name));
+	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+		if (sp->cmn.response_multiple_NPort) {
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_ELS | LOG_VPORT,
+					 "1816 FLOGI NPIV supported, "
+					 "response data 0x%x\n",
+					 sp->cmn.response_multiple_NPort);
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			/* Because we asked f/w for NPIV it still expects us
+			to call reg_vnpid atleast for the physcial host */
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_ELS | LOG_VPORT,
+					 "1817 Fabric does not support NPIV "
+					 "- configuring single port mode.\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+			spin_unlock_irq(&phba->hbalock);
+		}
+	}
+
+	/*
+	 * For FC we need to do some special processing because of the SLI
+	 * Port's default settings of the Common Service Parameters.
+	 */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
+		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+		if (fabric_param_changed)
+			lpfc_unregister_fcf_prep(phba);
+
+		/* This should just update the VFI CSPs*/
+		if (vport->fc_flag & FC_VFI_REGISTERED)
+			lpfc_issue_reg_vfi(vport);
+	}
+
+	if (fabric_param_changed &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+
+		/* If our NportID changed, we need to ensure all
+		 * remaining NPORTs get unreg_login'ed.
+		 */
+		list_for_each_entry_safe(np, next_np,
+					&vport->fc_nodes, nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(np))
+				continue;
+			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+				   !(np->nlp_flag & NLP_NPR_ADISC))
+				continue;
+			spin_lock_irq(shost->host_lock);
+			np->nlp_flag &= ~NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_unreg_rpi(vport, np);
+		}
+		lpfc_cleanup_pending_mbox(vport);
+
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			lpfc_sli4_unreg_all_rpis(vport);
+			lpfc_mbx_unreg_vpi(vport);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			spin_unlock_irq(shost->host_lock);
+		}
+
+		/*
+		 * For SLI3 and SLI4, the VPI needs to be reregistered in
+		 * response to this fabric parameter change event.
+		 */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
+	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+			/*
+			 * Driver needs to re-reg VPI in order for f/w
+			 * to update the MAC address.
+			 */
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+			lpfc_register_new_vport(phba, vport, ndlp);
+			return 0;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+			lpfc_register_new_vport(phba, vport, ndlp);
+		else
+			lpfc_issue_fabric_reglogin(vport);
+	} else {
+		ndlp->nlp_type |= NLP_FABRIC;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
+			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		} else if (vport->fc_flag & FC_VFI_REGISTERED)
+			lpfc_issue_init_vpi(vport);
+		else {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					"3135 Need register VFI: (x%x/%x)\n",
+					vport->fc_prevDID, vport->fc_myDID);
+			lpfc_issue_reg_vfi(vport);
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
+ * in a point-to-point topology. First, the @vport's N_Port Name is compared
+ * with the received N_Port Name: if the @vport's N_Port Name is greater than
+ * the received N_Port Name lexicographically, this node shall assign local
+ * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
+ * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
+ * this node shall just wait for the remote node to issue PLOGI and assign
+ * N_Port IDs.
+ *
+ * Return code
+ *   0 - Success
+ *   -ENXIO - Fail
+ **/
+static int
+lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  struct serv_parm *sp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+	vport->fc_flag |= FC_PT2PT;
+	spin_unlock_irq(shost->host_lock);
+
+	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+		lpfc_unregister_fcf_prep(phba);
+
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_VFI_REGISTERED;
+		spin_unlock_irq(shost->host_lock);
+		phba->fc_topology_changed = 0;
+	}
+
+	rc = memcmp(&vport->fc_portname, &sp->portName,
+		    sizeof(vport->fc_portname));
+
+	if (rc >= 0) {
+		/* This side will initiate the PLOGI */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_PT2PT_PLOGI;
+		spin_unlock_irq(shost->host_lock);
+
+		/*
+		 * N_Port ID cannot be 0, set our Id to LocalID
+		 * the other side will be RemoteID.
+		 */
+
+		/* not equal */
+		if (rc)
+			vport->fc_myDID = PT2PT_LocalID;
+
+		/* Decrement ndlp reference count indicating that ndlp can be
+		 * safely released when other references to it are done.
+		 */
+		lpfc_nlp_put(ndlp);
+
+		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
+		if (!ndlp) {
+			/*
+			 * Cannot find existing Fabric ndlp, so allocate a
+			 * new one
+			 */
+			ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
+			if (!ndlp)
+				goto fail;
+		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+			if(!ndlp)
+				goto fail;
+		}
+
+		memcpy(&ndlp->nlp_portname, &sp->portName,
+		       sizeof(struct lpfc_name));
+		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
+		       sizeof(struct lpfc_name));
+		/* Set state will put ndlp onto node list if not already done */
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+	} else
+		/* This side will wait for the PLOGI, decrement ndlp reference
+		 * count indicating that ndlp can be released when other
+		 * references to it are done.
+		 */
+		lpfc_nlp_put(ndlp);
+
+	/* If we are pt2pt with another NPort, force NPIV off! */
+	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		goto fail;
+
+	lpfc_config_link(phba, mbox);
+
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+	mbox->vport = vport;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return -ENXIO;
+}
+
+/**
+ * lpfc_cmpl_els_flogi - Completion callback function for flogi
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the top-level completion callback function for issuing
+ * a Fabric Login (FLOGI) command. If the response IOCB reported error,
+ * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
+ * retry has been made (either immediately or delayed with lpfc_els_retry()
+ * returning 1), the command IOCB will be released and function returned.
+ * If the retry attempt has been given up (possibly reach the maximum
+ * number of retries), one additional decrement of ndlp reference shall be
+ * invoked before going out after releasing the command IOCB. This will
+ * actually release the remote node (Note, lpfc_els_free_iocb() will also
+ * invoke one decrement of ndlp reference count). If no error reported in
+ * the IOCB status, the command Port ID field is used to determine whether
+ * this is a point-to-point topology or a fabric topology: if the Port ID
+ * field is assigned, it is a fabric topology; otherwise, it is a
+ * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
+ * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
+ * specific topology completion conditions.
+ **/
+static void
+lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_nodelist *ndlp = cmdiocb->context1;
+	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+	struct serv_parm *sp;
+	uint16_t fcf_index;
+	int rc;
+
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport)) {
+		/* One additional decrement on node reference count to
+		 * trigger the release of the node
+		 */
+		lpfc_nlp_put(ndlp);
+		goto out;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"FLOGI cmpl:      status:x%x/x%x state:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		vport->port_state);
+
+	if (irsp->ulpStatus) {
+		/*
+		 * In case of FIP mode, perform roundrobin FCF failover
+		 * due to new FCF discovery
+		 */
+		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
+		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+			if (phba->link_state < LPFC_LINK_UP)
+				goto stop_rr_fcf_flogi;
+			if ((phba->fcoe_cvl_eventtag_attn ==
+			     phba->fcoe_cvl_eventtag) &&
+			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+			    IOERR_SLI_ABORTED))
+				goto stop_rr_fcf_flogi;
+			else
+				phba->fcoe_cvl_eventtag_attn =
+					phba->fcoe_cvl_eventtag;
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+					"2611 FLOGI failed on FCF (x%x), "
+					"status:x%x/x%x, tmo:x%x, perform "
+					"roundrobin FCF failover\n",
+					phba->fcf.current_rec.fcf_indx,
+					irsp->ulpStatus, irsp->un.ulpWord[4],
+					irsp->ulpTimeout);
+			lpfc_sli4_set_fcf_flogi_fail(phba,
+					phba->fcf.current_rec.fcf_indx);
+			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+			if (rc)
+				goto out;
+		}
+
+stop_rr_fcf_flogi:
+		/* FLOGI failure */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"2858 FLOGI failure Status:x%x/x%x TMO:x%x "
+				"Data x%x x%x\n",
+				irsp->ulpStatus, irsp->un.ulpWord[4],
+				irsp->ulpTimeout, phba->hba_flag,
+				phba->fcf.fcf_flag);
+
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+			goto out;
+
+		/* FLOGI failure */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpTimeout);
+
+
+		/* If this is not a loop open failure, bail out */
+		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+					IOERR_LOOP_OPEN_FAILURE)))
+			goto flogifail;
+
+		/* FLOGI failed, so there is no fabric */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+
+		/* If private loop, then allow max outstanding els to be
+		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
+		 * alpa map would take too long otherwise.
+		 */
+		if (phba->alpa_map[0] == 0)
+			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
+		if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+		     (vport->fc_prevDID != vport->fc_myDID) ||
+			phba->fc_topology_changed)) {
+			if (vport->fc_flag & FC_VFI_REGISTERED) {
+				if (phba->fc_topology_changed) {
+					lpfc_unregister_fcf_prep(phba);
+					spin_lock_irq(shost->host_lock);
+					vport->fc_flag &= ~FC_VFI_REGISTERED;
+					spin_unlock_irq(shost->host_lock);
+					phba->fc_topology_changed = 0;
+				} else {
+					lpfc_sli4_unreg_all_rpis(vport);
+				}
+			}
+
+			/* Do not register VFI if the driver aborted FLOGI */
+			if (!lpfc_error_lost_link(irsp))
+				lpfc_issue_reg_vfi(vport);
+			lpfc_nlp_put(ndlp);
+			goto out;
+		}
+		goto flogifail;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
+	spin_unlock_irq(shost->host_lock);
+
+	/*
+	 * The FLogI succeeded.  Sync the data for the CPU before
+	 * accessing it.
+	 */
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	if (!prsp)
+		goto out;
+	sp = prsp->virt + sizeof(uint32_t);
+
+	/* FLOGI completes successfully */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0101 FLOGI completes successfully, I/O tag:x%x, "
+			 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
+			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+			 vport->port_state, vport->fc_flag);
+
+	if (vport->port_state == LPFC_FLOGI) {
+		/*
+		 * If Common Service Parameters indicate Nport
+		 * we are point to point, if Fport we are Fabric.
+		 */
+		if (sp->cmn.fPort)
+			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+		else if (!(phba->hba_flag & HBA_FCOE_MODE))
+			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
+		else {
+			lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_FIP | LOG_ELS,
+				"2831 FLOGI response with cleared Fabric "
+				"bit fcf_index 0x%x "
+				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
+				"Fabric Name "
+				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
+				phba->fcf.current_rec.fcf_indx,
+				phba->fcf.current_rec.switch_name[0],
+				phba->fcf.current_rec.switch_name[1],
+				phba->fcf.current_rec.switch_name[2],
+				phba->fcf.current_rec.switch_name[3],
+				phba->fcf.current_rec.switch_name[4],
+				phba->fcf.current_rec.switch_name[5],
+				phba->fcf.current_rec.switch_name[6],
+				phba->fcf.current_rec.switch_name[7],
+				phba->fcf.current_rec.fabric_name[0],
+				phba->fcf.current_rec.fabric_name[1],
+				phba->fcf.current_rec.fabric_name[2],
+				phba->fcf.current_rec.fabric_name[3],
+				phba->fcf.current_rec.fabric_name[4],
+				phba->fcf.current_rec.fabric_name[5],
+				phba->fcf.current_rec.fabric_name[6],
+				phba->fcf.current_rec.fabric_name[7]);
+			lpfc_nlp_put(ndlp);
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
+			spin_unlock_irq(&phba->hbalock);
+			phba->fcf.fcf_redisc_attempted = 0; /* reset */
+			goto out;
+		}
+		if (!rc) {
+			/* Mark the FCF discovery process done */
+			if (phba->hba_flag & HBA_FIP_SUPPORT)
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
+						LOG_ELS,
+						"2769 FLOGI to FCF (x%x) "
+						"completed successfully\n",
+						phba->fcf.current_rec.fcf_indx);
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
+			spin_unlock_irq(&phba->hbalock);
+			phba->fcf.fcf_redisc_attempted = 0; /* reset */
+			goto out;
+		}
+	}
+
+flogifail:
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_nlp_put(ndlp);
+
+	if (!lpfc_error_lost_link(irsp)) {
+		/* FLOGI failed, so just use loop map to make discovery list */
+		lpfc_disc_list_loopmap(vport);
+
+		/* Start discovery */
+		lpfc_disc_start(vport);
+	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+			 IOERR_SLI_ABORTED) &&
+			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+			 IOERR_SLI_DOWN))) &&
+			(phba->link_state != LPFC_CLEAR_LA)) {
+		/* If FLOGI failed enable link interrupt. */
+		lpfc_issue_clear_la(phba, vport);
+	}
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fabric Login (FLOGI) Request ELS command
+ * for a @vport. The initiator service parameters are put into the payload
+ * of the FLOGI Request IOCB and the top-level callback function pointer
+ * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
+ * function field. The lpfc_issue_fabric_iocb routine is invoked to send
+ * out FLOGI ELS command with one outstanding fabric IOCB at a time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FLOGI ELS command.
+ *
+ * Return code
+ *   0 - successfully issued flogi iocb for @vport
+ *   1 - failed to issue flogi iocb for @vport
+ **/
+static int
+lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		     uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct serv_parm *sp;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	uint32_t tmo;
+	int rc;
+
+	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_FLOGI);
+
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For FLOGI request, remainder of payload is service parameters */
+	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+	sp = (struct serv_parm *) pcmd;
+
+	/* Setup CSPs accordingly for Fabric */
+	sp->cmn.e_d_tov = 0;
+	sp->cmn.w2.r_a_tov = 0;
+	sp->cmn.virtual_fabric_support = 0;
+	sp->cls1.classValid = 0;
+	if (sp->cmn.fcphLow < FC_PH3)
+		sp->cmn.fcphLow = FC_PH3;
+	if (sp->cmn.fcphHigh < FC_PH3)
+		sp->cmn.fcphHigh = FC_PH3;
+
+	if  (phba->sli_rev == LPFC_SLI_REV4) {
+		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+		    LPFC_SLI_INTF_IF_TYPE_0) {
+			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+			/* FLOGI needs to be 3 for WQE FCFI */
+			/* Set the fcfi to the fcfi we registered with */
+			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+		}
+		/* Can't do SLI4 class2 without support sequence coalescing */
+		sp->cls2.classValid = 0;
+		sp->cls2.seqDelivery = 0;
+	} else {
+		/* Historical, setting sequential-delivery bit for SLI3 */
+		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
+		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
+		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+			sp->cmn.request_multiple_Nport = 1;
+			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+			icmd->ulpCt_h = 1;
+			icmd->ulpCt_l = 0;
+		} else
+			sp->cmn.request_multiple_Nport = 0;
+	}
+
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+		icmd->un.elsreq64.myID = 0;
+		icmd->un.elsreq64.fl = 1;
+	}
+
+	tmo = phba->fc_ratov;
+	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
+	lpfc_set_disctmo(vport);
+	phba->fc_ratov = tmo;
+
+	phba->fc_stat.elsXmitFLOGI++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue FLOGI:     opt:x%x",
+		phba->sli3_options, 0, 0);
+
+	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
+ * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
+ * list and issues an abort IOCB commond on each outstanding IOCB that
+ * contains a active Fabric_DID ndlp. Note that this function is to issue
+ * the abort IOCB command on all the outstanding IOCBs, thus when this
+ * function returns, it does not guarantee all the IOCBs are actually aborted.
+ *
+ * Return code
+ *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
+ **/
+int
+lpfc_els_abort_flogi(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *iocb, *next_iocb;
+	struct lpfc_nodelist *ndlp;
+	IOCB_t *icmd;
+
+	/* Abort outstanding I/O on NPort <nlp_DID> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+			"0201 Abort outstanding I/O on NPort x%x\n",
+			Fabric_DID);
+
+	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return -EIO;
+
+	/*
+	 * Check the txcmplq for an iocb that matches the nport the driver is
+	 * searching for.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+		icmd = &iocb->iocb;
+		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+			ndlp = (struct lpfc_nodelist *)(iocb->context1);
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+			    (ndlp->nlp_DID == Fabric_DID))
+				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return 0;
+}
+
+/**
+ * lpfc_initial_flogi - Issue an initial fabric login for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Login (FLOGI) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
+ * is then invoked with the @vport and the ndlp to perform the FLOGI for the
+ * @vport.
+ *
+ * Return code
+ *   0 - failed to issue initial flogi for @vport
+ *   1 - successfully issued initial flogi for @vport
+ **/
+int
+lpfc_initial_flogi(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	vport->port_state = LPFC_FLOGI;
+	lpfc_set_disctmo(vport);
+
+	/* First look for the Fabric ndlp */
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = lpfc_nlp_init(vport, Fabric_DID);
+		if (!ndlp)
+			return 0;
+		/* Set the node type */
+		ndlp->nlp_type |= NLP_FABRIC;
+		/* Put ndlp onto node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 0;
+	}
+
+	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
+		/* This decrement of reference count to node shall kick off
+		 * the release of the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Discover (FDISC) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
+ * is then invoked with the @vport and the ndlp to perform the FDISC for the
+ * @vport.
+ *
+ * Return code
+ *   0 - failed to issue initial fdisc for @vport
+ *   1 - successfully issued initial fdisc for @vport
+ **/
+int
+lpfc_initial_fdisc(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	/* First look for the Fabric ndlp */
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = lpfc_nlp_init(vport, Fabric_DID);
+		if (!ndlp)
+			return 0;
+		/* Put ndlp onto node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 0;
+	}
+
+	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
+		/* decrement node reference count to trigger the release of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ * lpfc_more_plogi - Check and issue remaining plogis for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there are more remaining Port Logins
+ * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
+ * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
+ * to issue ELS PLOGIs up to the configured discover threads with the
+ * @vport (@vport->cfg_discovery_threads). The function also decrement
+ * the @vport's num_disc_node by 1 if it is not already 0.
+ **/
+void
+lpfc_more_plogi(struct lpfc_vport *vport)
+{
+	if (vport->num_disc_nodes)
+		vport->num_disc_nodes--;
+
+	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0232 Continue discovery with %d PLOGIs to go "
+			 "Data: x%x x%x x%x\n",
+			 vport->num_disc_nodes, vport->fc_plogi_cnt,
+			 vport->fc_flag, vport->port_state);
+	/* Check to see if there are more PLOGIs to be sent */
+	if (vport->fc_flag & FC_NLP_MORE)
+		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
+		lpfc_els_disc_plogi(vport);
+
+	return;
+}
+
+/**
+ * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
+ * @phba: pointer to lpfc hba data structure.
+ * @prsp: pointer to response IOCB payload.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine checks and indicates whether the WWPN of an N_Port, retrieved
+ * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
+ * The following cases are considered N_Port confirmed:
+ * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
+ * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
+ * it does not have WWPN assigned either. If the WWPN is confirmed, the
+ * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
+ * 1) if there is a node on vport list other than the @ndlp with the same
+ * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
+ * on that node to release the RPI associated with the node; 2) if there is
+ * no node found on vport list with the same WWPN of the N_Port PLOGI logged
+ * into, a new node shall be allocated (or activated). In either case, the
+ * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
+ * be released and the new_ndlp shall be put on to the vport node list and
+ * its pointer returned as the confirmed node.
+ *
+ * Note that before the @ndlp got "released", the keepDID from not-matching
+ * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
+ * of the @ndlp. This is because the release of @ndlp is actually to put it
+ * into an inactive state on the vport node list and the vport node list
+ * management algorithm does not allow two node with a same DID.
+ *
+ * Return code
+ *   pointer to the PLOGI N_Port @ndlp
+ **/
+static struct lpfc_nodelist *
+lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+			 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_vport *vport = ndlp->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *new_ndlp;
+	struct lpfc_rport_data *rdata;
+	struct fc_rport *rport;
+	struct serv_parm *sp;
+	uint8_t  name[sizeof(struct lpfc_name)];
+	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
+	uint16_t keep_nlp_state;
+	struct lpfc_nvme_rport *keep_nrport = NULL;
+	int  put_node;
+	int  put_rport;
+	unsigned long *active_rrqs_xri_bitmap = NULL;
+
+	/* Fabric nodes can have the same WWPN so we don't bother searching
+	 * by WWPN.  Just return the ndlp that was given to us.
+	 */
+	if (ndlp->nlp_type & NLP_FABRIC)
+		return ndlp;
+
+	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
+	memset(name, 0, sizeof(struct lpfc_name));
+
+	/* Now we find out if the NPort we are logging into, matches the WWPN
+	 * we have for that ndlp. If not, we have some work to do.
+	 */
+	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
+
+	/* return immediately if the WWPN matches ndlp */
+	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
+		return ndlp;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
+						       GFP_KERNEL);
+		if (active_rrqs_xri_bitmap)
+			memset(active_rrqs_xri_bitmap, 0,
+			       phba->cfg_rrq_xri_bitmap_sz);
+	}
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+			 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
+			 "new_ndlp x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag,  ndlp->nlp_fc4_type,
+			 (new_ndlp ? new_ndlp->nlp_DID : 0),
+			 (new_ndlp ? new_ndlp->nlp_flag : 0),
+			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
+
+	if (!new_ndlp) {
+		rc = memcmp(&ndlp->nlp_portname, name,
+			    sizeof(struct lpfc_name));
+		if (!rc) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
+			return ndlp;
+		}
+		new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
+		if (!new_ndlp) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
+			return ndlp;
+		}
+	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
+		rc = memcmp(&ndlp->nlp_portname, name,
+			    sizeof(struct lpfc_name));
+		if (!rc) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
+			return ndlp;
+		}
+		new_ndlp = lpfc_enable_node(vport, new_ndlp,
+						NLP_STE_UNUSED_NODE);
+		if (!new_ndlp) {
+			if (active_rrqs_xri_bitmap)
+				mempool_free(active_rrqs_xri_bitmap,
+					     phba->active_rrq_pool);
+			return ndlp;
+		}
+		keepDID = new_ndlp->nlp_DID;
+		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
+			memcpy(active_rrqs_xri_bitmap,
+			       new_ndlp->active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
+	} else {
+		keepDID = new_ndlp->nlp_DID;
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(active_rrqs_xri_bitmap,
+			       new_ndlp->active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
+	}
+
+	/* At this point in this routine, we know new_ndlp will be
+	 * returned. however, any previous GID_FTs that were done
+	 * would have updated nlp_fc4_type in ndlp, so we must ensure
+	 * new_ndlp has the right value.
+	 */
+	if (vport->fc_flag & FC_FABRIC)
+		new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
+
+	lpfc_unreg_rpi(vport, new_ndlp);
+	new_ndlp->nlp_DID = ndlp->nlp_DID;
+	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		memcpy(new_ndlp->active_rrqs_xri_bitmap,
+		       ndlp->active_rrqs_xri_bitmap,
+		       phba->cfg_rrq_xri_bitmap_sz);
+
+	spin_lock_irq(shost->host_lock);
+	keep_nlp_flag = new_ndlp->nlp_flag;
+	new_ndlp->nlp_flag = ndlp->nlp_flag;
+	ndlp->nlp_flag = keep_nlp_flag;
+	spin_unlock_irq(shost->host_lock);
+
+	/* Set nlp_states accordingly */
+	keep_nlp_state = new_ndlp->nlp_state;
+	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
+
+	/* interchange the nvme remoteport structs */
+	keep_nrport = new_ndlp->nrport;
+	new_ndlp->nrport = ndlp->nrport;
+
+	/* Move this back to NPR state */
+	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
+		/* The new_ndlp is replacing ndlp totally, so we need
+		 * to put ndlp on UNUSED list and try to free it.
+		 */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "3179 PLOGI confirm NEW: %x %x\n",
+			 new_ndlp->nlp_DID, keepDID);
+
+		/* Fix up the rport accordingly */
+		rport =  ndlp->rport;
+		if (rport) {
+			rdata = rport->dd_data;
+			if (rdata->pnode == ndlp) {
+				/* break the link before dropping the ref */
+				ndlp->rport = NULL;
+				lpfc_nlp_put(ndlp);
+				rdata->pnode = lpfc_nlp_get(new_ndlp);
+				new_ndlp->rport = rport;
+			}
+			new_ndlp->nlp_type = ndlp->nlp_type;
+		}
+
+		/* Fix up the nvme rport */
+		if (ndlp->nrport) {
+			ndlp->nrport = NULL;
+			lpfc_nlp_put(ndlp);
+		}
+
+		/* We shall actually free the ndlp with both nlp_DID and
+		 * nlp_portname fields equals 0 to avoid any ndlp on the
+		 * nodelist never to be used.
+		 */
+		if (ndlp->nlp_DID == 0) {
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+		}
+
+		/* Two ndlps cannot have the same did on the nodelist */
+		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(ndlp->active_rrqs_xri_bitmap,
+			       active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
+
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			lpfc_drop_node(vport, ndlp);
+	}
+	else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "3180 PLOGI confirm SWAP: %x %x\n",
+			 new_ndlp->nlp_DID, keepDID);
+
+		lpfc_unreg_rpi(vport, ndlp);
+
+		/* Two ndlps cannot have the same did */
+		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    active_rrqs_xri_bitmap)
+			memcpy(ndlp->active_rrqs_xri_bitmap,
+			       active_rrqs_xri_bitmap,
+			       phba->cfg_rrq_xri_bitmap_sz);
+
+		/* Since we are switching over to the new_ndlp,
+		 * reset the old ndlp state
+		 */
+		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+		    (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+			keep_nlp_state = NLP_STE_NPR_NODE;
+		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
+
+		/* Previous ndlp no longer active with nvme host transport.
+		 * Remove reference from earlier registration unless the
+		 * nvme host took care of it.
+		 */
+		if (ndlp->nrport)
+			lpfc_nlp_put(ndlp);
+		ndlp->nrport = keep_nrport;
+
+		/* Fix up the rport accordingly */
+		rport = ndlp->rport;
+		if (rport) {
+			rdata = rport->dd_data;
+			put_node = rdata->pnode != NULL;
+			put_rport = ndlp->rport != NULL;
+			rdata->pnode = NULL;
+			ndlp->rport = NULL;
+			if (put_node)
+				lpfc_nlp_put(ndlp);
+			if (put_rport)
+				put_device(&rport->dev);
+		}
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    active_rrqs_xri_bitmap)
+		mempool_free(active_rrqs_xri_bitmap,
+			     phba->active_rrq_pool);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
+			 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
+			 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
+			 new_ndlp->nlp_fc4_type);
+
+	return new_ndlp;
+}
+
+/**
+ * lpfc_end_rscn - Check and handle more rscn for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether more Registration State Change
+ * Notifications (RSCNs) came in while the discovery state machine was in
+ * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
+ * invoked to handle the additional RSCNs for the @vport. Otherwise, the
+ * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
+ * handling the RSCNs.
+ **/
+void
+lpfc_end_rscn(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (vport->fc_flag & FC_RSCN_MODE) {
+		/*
+		 * Check to see if more RSCNs came in while we were
+		 * processing this one.
+		 */
+		if (vport->fc_rscn_id_cnt ||
+		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+			lpfc_els_handle_rscn(vport);
+		else {
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_RSCN_MODE;
+			spin_unlock_irq(shost->host_lock);
+		}
+	}
+}
+
+/**
+ * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine will call the clear rrq function to free the rrq and
+ * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
+ * exist then the clear_rrq is still called because the rrq needs to
+ * be freed.
+ **/
+
+static void
+lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_node_rrq *rrq;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	rrq = cmdiocb->context_un.rrq;
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"RRQ cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+
+	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2882 RRQ completes to NPort x%x "
+				 "with no ndlp. Data: x%x x%x x%x\n",
+				 irsp->un.elsreq64.remoteID,
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpIoTag);
+		goto out;
+	}
+
+	/* rrq completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2880 RRQ completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		/* RRQ failed Don't print the vport to vport rjts */
+		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+			(phba)->pport->cfg_log_verbose & LOG_ELS)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+	}
+out:
+	if (rrq)
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+/**
+ * lpfc_cmpl_els_plogi - Completion callback function for plogi
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Port
+ * Login (PLOGI) command. For PLOGI completion, there must be an active
+ * ndlp on the vport node list that matches the remote node ID from the
+ * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
+ * ignored and command IOCB released. The PLOGI response IOCB status is
+ * checked for error conditons. If there is error status reported, PLOGI
+ * retry shall be attempted by invoking the lpfc_els_retry() routine.
+ * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
+ * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
+ * (DSM) is set for this PLOGI completion. Finally, it checks whether
+ * there are additional N_Port nodes with the vport that need to perform
+ * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
+ * PLOGIs.
+ **/
+static void
+lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *prsp;
+	int disc, rc;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"PLOGI cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+
+	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0136 PLOGI completes to NPort x%x "
+				 "with no ndlp. Data: x%x x%x x%x\n",
+				 irsp->un.elsreq64.remoteID,
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpIoTag);
+		goto out;
+	}
+
+	/* Since ndlp can be freed in the disc state machine, note if this node
+	 * is being used during discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+	spin_unlock_irq(shost->host_lock);
+	rc   = 0;
+
+	/* PLOGI completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0102 PLOGI completes to NPort x%06x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_fc4_type,
+			 irsp->ulpStatus, irsp->un.ulpWord[4],
+			 disc, vport->num_disc_nodes);
+
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		goto out;
+	}
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+			/* ELS command is being retried */
+			if (disc) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+				spin_unlock_irq(shost->host_lock);
+			}
+			goto out;
+		}
+		/* PLOGI failed Don't print the vport to vport rjts */
+		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+			(phba)->pport->cfg_log_verbose & LOG_ELS)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (lpfc_error_lost_link(irsp))
+			rc = NLP_STE_FREED_NODE;
+		else
+			rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						     NLP_EVT_CMPL_PLOGI);
+	} else {
+		/* Good status, call state machine */
+		prsp = list_entry(((struct lpfc_dmabuf *)
+				   cmdiocb->context2)->list.next,
+				  struct lpfc_dmabuf, list);
+		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+		rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					     NLP_EVT_CMPL_PLOGI);
+	}
+
+	if (disc && vport->num_disc_nodes) {
+		/* Check to see if there are more PLOGIs to be sent */
+		lpfc_more_plogi(vport);
+
+		if (vport->num_disc_nodes == 0) {
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_NDISC_ACTIVE;
+			spin_unlock_irq(shost->host_lock);
+
+			lpfc_can_disctmo(vport);
+			lpfc_end_rscn(vport);
+		}
+	}
+
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: destination port identifier.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Port Login (PLOGI) command to a remote N_Port
+ * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+ * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+ * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PLOGI ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued a plogi for @vport
+ *   1 - failed to issue a plogi for @vport
+ **/
+int
+lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct Scsi_Host *shost;
+	struct serv_parm *sp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int ret;
+
+	ndlp = lpfc_findnode_did(vport, did);
+	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
+		ndlp = NULL;
+
+	/* If ndlp is not NULL, we will bump the reference count on it */
+	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+				     ELS_CMD_PLOGI);
+	if (!elsiocb)
+		return 1;
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
+	spin_unlock_irq(shost->host_lock);
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For PLOGI request, remainder of payload is service parameters */
+	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+	sp = (struct serv_parm *) pcmd;
+
+	/*
+	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+	 * to device on remote loops work.
+	 */
+	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+		sp->cmn.altBbCredit = 1;
+
+	if (sp->cmn.fcphLow < FC_PH_4_3)
+		sp->cmn.fcphLow = FC_PH_4_3;
+
+	if (sp->cmn.fcphHigh < FC_PH3)
+		sp->cmn.fcphHigh = FC_PH3;
+
+	sp->cmn.valid_vendor_ver_level = 0;
+	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
+	sp->cmn.bbRcvSizeMsb &= 0xF;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue PLOGI:     did:x%x",
+		did, 0, 0);
+
+	/* If our firmware supports this feature, convey that
+	 * information to the target using the vendor specific field.
+	 */
+	if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
+		sp->cmn.valid_vendor_ver_level = 1;
+		sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
+		sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
+	}
+
+	phba->fc_stat.elsXmitPLOGI++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (ret == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_prli - Completion callback function for prli
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for a Process Login
+ * (PRLI) ELS command. The PRLI response IOCB status is checked for error
+ * status. If there is error status reported, PRLI retry shall be attempted
+ * by invoking the lpfc_els_retry() routine. Otherwise, the state
+ * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
+ * ndlp to mark the PRLI completion.
+ **/
+static void
+lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &(rspiocb->iocb);
+	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+	/* Driver supports multiple FC4 types.  Counters matter. */
+	vport->fc_prli_sent--;
+	ndlp->fc4_prli_sent--;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"PRLI cmpl:       status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		ndlp->nlp_DID);
+
+	/* PRLI completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0103 PRLI completes to NPort x%06x "
+			 "Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 vport->num_disc_nodes, ndlp->fc4_prli_sent);
+
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport))
+		goto out;
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+			/* ELS command is being retried */
+			goto out;
+		}
+
+		/* PRLI failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
+				 "data: x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (lpfc_error_lost_link(irsp))
+			goto out;
+		else
+			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						NLP_EVT_CMPL_PRLI);
+	} else
+		/* Good status, call state machine.  However, if another
+		 * PRLI is outstanding, don't call the state machine
+		 * because final disposition to Mapped or Unmapped is
+		 * completed there.
+		 */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_CMPL_PRLI);
+
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_prli - Issue a prli iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Process Login (PRLI) ELS command for the
+ * @vport. The PRLI service parameters are set up in the payload of the
+ * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
+ * is put to the IOCB completion callback func field before invoking the
+ * routine lpfc_sli_issue_iocb() to send out PRLI command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI ELS command.
+ *
+ * Return code
+ *   0 - successfully issued prli iocb command for @vport
+ *   1 - failed to issue prli iocb command for @vport
+ **/
+int
+lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		    uint8_t retry)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba *phba = vport->phba;
+	PRLI *npr;
+	struct lpfc_nvme_prli *npr_nvme;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	u32 local_nlp_type, elscmd;
+
+	/*
+	 * If we are in RSCN mode, the FC4 types supported from a
+	 * previous GFT_ID command may not be accurate. So, if we
+	 * are a NVME Initiator, always look for the possibility of
+	 * the remote NPort beng a NVME Target.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    vport->fc_flag & FC_RSCN_MODE &&
+	    vport->nvmei_support)
+		ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+	local_nlp_type = ndlp->nlp_fc4_type;
+
+	/* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
+	 * fields here before any of them can complete.
+	 */
+	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+	ndlp->nvme_fb_size = 0;
+
+ send_next_prli:
+	if (local_nlp_type & NLP_FC4_FCP) {
+		/* Payload is 4 + 16 = 20 x14 bytes. */
+		cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+		elscmd = ELS_CMD_PRLI;
+	} else if (local_nlp_type & NLP_FC4_NVME) {
+		/* Payload is 4 + 20 = 24 x18 bytes. */
+		cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
+		elscmd = ELS_CMD_NVMEPRLI;
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
+				 ndlp->nlp_fc4_type, ndlp->nlp_DID);
+		return 1;
+	}
+
+	/* SLI3 ports don't support NVME.  If this rport is a strict NVME
+	 * FC4 type, implicitly LOGO.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV3 &&
+	    ndlp->nlp_fc4_type == NLP_FC4_NVME) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
+				 ndlp->nlp_type);
+		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+		return 1;
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, elscmd);
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For PRLI request, remainder of payload is service parameters */
+	memset(pcmd, 0, cmdsize);
+
+	if (local_nlp_type & NLP_FC4_FCP) {
+		/* Remainder of payload is FCP PRLI parameter page.
+		 * Note: this data structure is defined as
+		 * BE/LE in the structure definition so no
+		 * byte swap call is made.
+		 */
+		*((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
+		pcmd += sizeof(uint32_t);
+		npr = (PRLI *)pcmd;
+
+		/*
+		 * If our firmware version is 3.20 or later,
+		 * set the following bits for FC-TAPE support.
+		 */
+		if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+			npr->ConfmComplAllowed = 1;
+			npr->Retry = 1;
+			npr->TaskRetryIdReq = 1;
+		}
+		npr->estabImagePair = 1;
+		npr->readXferRdyDis = 1;
+		if (vport->cfg_first_burst_size)
+			npr->writeXferRdyDis = 1;
+
+		/* For FCP support */
+		npr->prliType = PRLI_FCP_TYPE;
+		npr->initiatorFunc = 1;
+		elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
+
+		/* Remove FCP type - processed. */
+		local_nlp_type &= ~NLP_FC4_FCP;
+	} else if (local_nlp_type & NLP_FC4_NVME) {
+		/* Remainder of payload is NVME PRLI parameter page.
+		 * This data structure is the newer definition that
+		 * uses bf macros so a byte swap is required.
+		 */
+		*((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
+		pcmd += sizeof(uint32_t);
+		npr_nvme = (struct lpfc_nvme_prli *)pcmd;
+		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
+		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
+
+		/* Only initiators request first burst. */
+		if ((phba->cfg_nvme_enable_fb) &&
+		    !phba->nvmet_support)
+			bf_set(prli_fba, npr_nvme, 1);
+
+		if (phba->nvmet_support) {
+			bf_set(prli_tgt, npr_nvme, 1);
+			bf_set(prli_disc, npr_nvme, 1);
+
+		} else {
+			bf_set(prli_init, npr_nvme, 1);
+		}
+		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
+		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
+		elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
+
+		/* Remove NVME type - processed. */
+		local_nlp_type &= ~NLP_FC4_NVME;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue PRLI:      did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitPRLI++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_PRLI_SND;
+
+	/* The vport counters are used for lpfc_scan_finished, but
+	 * the ndlp is used to track outstanding PRLIs for different
+	 * FC4 types.
+	 */
+	vport->fc_prli_sent++;
+	ndlp->fc4_prli_sent++;
+	spin_unlock_irq(shost->host_lock);
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_PRLI_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+
+
+	/* The driver supports 2 FC4 types.  Make sure
+	 * a PRLI is issued for all types before exiting.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV4 &&
+	    local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
+		goto send_next_prli;
+
+	return 0;
+}
+
+/**
+ * lpfc_rscn_disc - Perform rscn discovery for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine performs Registration State Change Notification (RSCN)
+ * discovery for a @vport. If the @vport's node port recovery count is not
+ * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
+ * the nodes that need recovery. If none of the PLOGI were needed through
+ * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
+ * invoked to check and handle possible more RSCN came in during the period
+ * of processing the current ones.
+ **/
+static void
+lpfc_rscn_disc(struct lpfc_vport *vport)
+{
+	lpfc_can_disctmo(vport);
+
+	/* RSCN discovery */
+	/* go thru NPR nodes and issue ELS PLOGIs */
+	if (vport->fc_npr_cnt)
+		if (lpfc_els_disc_plogi(vport))
+			return;
+
+	lpfc_end_rscn(vport);
+}
+
+/**
+ * lpfc_adisc_done - Complete the adisc phase of discovery
+ * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
+ *
+ * This function is called when the final ADISC is completed during discovery.
+ * This function handles clearing link attention or issuing reg_vpi depending
+ * on whether npiv is enabled. This function also kicks off the PLOGI phase of
+ * discovery.
+ * This function is called with no locks held.
+ **/
+static void
+lpfc_adisc_done(struct lpfc_vport *vport)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+
+	/*
+	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
+	 * and continue discovery.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
+		/* The ADISCs are complete.  Doesn't matter if they
+		 * succeeded or failed because the ADISC completion
+		 * routine guarantees to call the state machine and
+		 * the RPI is either unregistered (failed ADISC response)
+		 * or the RPI is still valid and the node is marked
+		 * mapped for a target.  The exchanges should be in the
+		 * correct state. This code is specific to SLI3.
+		 */
+		lpfc_issue_clear_la(phba, vport);
+		lpfc_issue_reg_vpi(phba, vport);
+		return;
+	}
+	/*
+	* For SLI2, we need to set port_state to READY
+	* and continue discovery.
+	*/
+	if (vport->port_state < LPFC_VPORT_READY) {
+		/* If we get here, there is nothing to ADISC */
+		lpfc_issue_clear_la(phba, vport);
+		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+			vport->num_disc_nodes = 0;
+			/* go thru NPR list, issue ELS PLOGIs */
+			if (vport->fc_npr_cnt)
+				lpfc_els_disc_plogi(vport);
+			if (!vport->num_disc_nodes) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
+		vport->port_state = LPFC_VPORT_READY;
+	} else
+		lpfc_rscn_disc(vport);
+}
+
+/**
+ * lpfc_more_adisc - Issue more adisc as needed
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine determines whether there are more ndlps on a @vport
+ * node list need to have Address Discover (ADISC) issued. If so, it will
+ * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
+ * remaining nodes which need to have ADISC sent.
+ **/
+void
+lpfc_more_adisc(struct lpfc_vport *vport)
+{
+	if (vport->num_disc_nodes)
+		vport->num_disc_nodes--;
+	/* Continue discovery with <num_disc_nodes> ADISCs to go */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0210 Continue discovery with %d ADISCs to go "
+			 "Data: x%x x%x x%x\n",
+			 vport->num_disc_nodes, vport->fc_adisc_cnt,
+			 vport->fc_flag, vport->port_state);
+	/* Check to see if there are more ADISCs to be sent */
+	if (vport->fc_flag & FC_NLP_MORE) {
+		lpfc_set_disctmo(vport);
+		/* go thru NPR nodes and issue any remaining ELS ADISCs */
+		lpfc_els_disc_adisc(vport);
+	}
+	if (!vport->num_disc_nodes)
+		lpfc_adisc_done(vport);
+	return;
+}
+
+/**
+ * lpfc_cmpl_els_adisc - Completion callback function for adisc
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the Address Discover
+ * (ADISC) command. It first checks to see whether link went down during
+ * the discovery process. If so, the node will be marked as node port
+ * recovery for issuing discover IOCB by the link attention handler and
+ * exit. Otherwise, the response status is checked. If error was reported
+ * in the response status, the ADISC command shall be retried by invoking
+ * the lpfc_els_retry() routine. Otherwise, if no error was reported in
+ * the response status, the state machine is invoked to set transition
+ * with respect to NLP_EVT_CMPL_ADISC event.
+ **/
+static void
+lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	int  disc;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &(rspiocb->iocb);
+	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"ADISC cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		ndlp->nlp_DID);
+
+	/* Since ndlp can be freed in the disc state machine, note if this node
+	 * is being used during discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	/* ADISC completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0104 ADISC completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		goto out;
+	}
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+			/* ELS command is being retried */
+			if (disc) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_set_disctmo(vport);
+			}
+			goto out;
+		}
+		/* ADISC failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (!lpfc_error_lost_link(irsp))
+			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						NLP_EVT_CMPL_ADISC);
+	} else
+		/* Good status, call state machine */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_CMPL_ADISC);
+
+	/* Check to see if there are more ADISCs to be sent */
+	if (disc && vport->num_disc_nodes)
+		lpfc_more_adisc(vport);
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues an Address Discover (ADISC) for an @ndlp on a
+ * @vport. It prepares the payload of the ADISC ELS command, updates the
+ * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
+ * to issue the ADISC ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC ELS command.
+ *
+ * Return code
+ *   0 - successfully issued adisc
+ *   1 - failed to issue adisc
+ **/
+int
+lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		     uint8_t retry)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	ADISC *ap;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+
+	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ADISC);
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For ADISC request, remainder of payload is service parameters */
+	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in ADISC payload */
+	ap = (ADISC *) pcmd;
+	ap->hardAL_PA = phba->fc_pref_ALPA;
+	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	ap->DID = be32_to_cpu(vport->fc_myDID);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue ADISC:     did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitADISC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_ADISC_SND;
+	spin_unlock_irq(shost->host_lock);
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_ADISC_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_logo - Completion callback function for logo
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the ELS Logout (LOGO)
+ * command. If no error status was reported from the LOGO response, the
+ * state machine of the associated ndlp shall be invoked for transition with
+ * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
+ * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ **/
+static void
+lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_vport *vport = ndlp->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfcMboxq *mbox;
+	unsigned long flags;
+	uint32_t skip_recovery = 0;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &(rspiocb->iocb);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_LOGO_SND;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"LOGO cmpl:       status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		ndlp->nlp_DID);
+
+	/* LOGO completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0105 LOGO completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, vport->num_disc_nodes);
+
+	if (lpfc_els_chk_latt(vport)) {
+		skip_recovery = 1;
+		goto out;
+	}
+
+	/* Check to see if link went down during discovery */
+	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+	        /* NLP_EVT_DEVICE_RM should unregister the RPI
+		 * which should abort all outstanding IOs.
+		 */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_DEVICE_RM);
+		skip_recovery = 1;
+		goto out;
+	}
+
+	/* The LOGO will not be retried on failure.  A LOGO was
+	 * issued to the remote rport and a ACC or RJT or no Answer are
+	 * all acceptable.  Note the failure and move forward with
+	 * discovery.  The PLOGI will retry.
+	 */
+	if (irsp->ulpStatus) {
+		/* LOGO failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (lpfc_error_lost_link(irsp)) {
+			skip_recovery = 1;
+			goto out;
+		}
+	}
+
+	/* Call state machine. This will unregister the rpi if needed. */
+	lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+	if ((vport->fc_flag & FC_PT2PT) &&
+		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+		phba->pport->fc_myDID = 0;
+
+		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+			if (phba->nvmet_support)
+				lpfc_nvmet_update_targetport(phba);
+			else
+				lpfc_nvme_update_localport(phba->pport);
+		}
+
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mbox) {
+			lpfc_config_link(phba, mbox);
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mbox->vport = vport;
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+				MBX_NOT_FINISHED) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+				skip_recovery = 1;
+			}
+		}
+	}
+
+	/*
+	 * If the node is a target, the handling attempts to recover the port.
+	 * For any other port type, the rpi is unregistered as an implicit
+	 * LOGO.
+	 */
+	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
+	    skip_recovery == 0) {
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+		spin_lock_irqsave(shost->host_lock, flags);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irqrestore(shost->host_lock, flags);
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "3187 LOGO completes to NPort x%x: Start "
+				 "Recovery Data: x%x x%x x%x x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4], irsp->ulpTimeout,
+				 vport->num_disc_nodes);
+		lpfc_disc_start(vport);
+	}
+	return;
+}
+
+/**
+ * lpfc_issue_els_logo - Issue a logo to an node on a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine constructs and issues an ELS Logout (LOGO) iocb command
+ * to a remote node, referred by an @ndlp on a @vport. It constructs the
+ * payload of the IOCB, properly sets up the @ndlp state, and invokes the
+ * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Callers of this routine are expected to unregister the RPI first
+ *
+ * Return code
+ *   0 - successfully issued logo
+ *   1 - failed to issue logo
+ **/
+int
+lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		    uint8_t retry)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	spin_lock_irq(shost->host_lock);
+	if (ndlp->nlp_flag & NLP_LOGO_SND) {
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_LOGO);
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in LOGO payload */
+	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue LOGO:      did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitLOGO++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_SND;
+	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+	spin_unlock_irq(shost->host_lock);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_LOGO_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_cmd - Completion callback function for generic els command
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for ELS commands.
+ * Specifically, it is the callback function which does not need to perform
+ * any command specific operations. It is currently used by the ELS command
+ * issuing routines for the ELS State Change  Request (SCR),
+ * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
+ * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
+ * certain debug loggings, this callback function simply invokes the
+ * lpfc_els_chk_latt() routine to check whether link went down during the
+ * discovery process.
+ **/
+static void
+lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+
+	irsp = &rspiocb->iocb;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+	/* ELS cmd tag <ulpIoTag> completes */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+			 irsp->ulpIoTag, irsp->ulpStatus,
+			 irsp->un.ulpWord[4], irsp->ulpTimeout);
+	/* Check to see if link went down during discovery */
+	lpfc_els_chk_latt(vport);
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_scr - Issue a scr to an node on a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a State Change Request (SCR) to a fabric node
+ * on a @vport. The remote node @nportid is passed into the function. It
+ * first search the @vport node list to find the matching ndlp. If no such
+ * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
+ * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
+ * routine is invoked to send the SCR IOCB.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the SCR ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued scr command
+ *   1 - Failed to issue scr command
+ **/
+int
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	struct lpfc_nodelist *ndlp;
+
+	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
+
+	ndlp = lpfc_findnode_did(vport, nportid);
+	if (!ndlp) {
+		ndlp = lpfc_nlp_init(vport, nportid);
+		if (!ndlp)
+			return 1;
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 1;
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_SCR);
+
+	if (!elsiocb) {
+		/* This will trigger the release of the node just
+		 * allocated
+		 */
+		lpfc_nlp_put(ndlp);
+		return 1;
+	}
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
+	pcmd += sizeof(uint32_t);
+
+	/* For SCR, remainder of payload is SCR parameter page */
+	memset(pcmd, 0, sizeof(SCR));
+	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue SCR:       did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitSCR++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		/* The additional lpfc_nlp_put will cause the following
+		 * lpfc_els_free_iocb routine to trigger the rlease of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	/* This will cause the callback-function lpfc_cmpl_els_cmd to
+	 * trigger the release of node.
+	 */
+
+	lpfc_nlp_put(ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_issue_els_farpr - Issue a farp to an node on a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fibre Channel Address Resolution Response
+ * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
+ * is passed into the function. It first search the @vport node list to find
+ * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
+ * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
+ * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PARPR ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued farpr command
+ *   1 - Failed to issue farpr command
+ **/
+static int
+lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	FARP *fp;
+	uint8_t *pcmd;
+	uint32_t *lp;
+	uint16_t cmdsize;
+	struct lpfc_nodelist *ondlp;
+	struct lpfc_nodelist *ndlp;
+
+	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
+
+	ndlp = lpfc_findnode_did(vport, nportid);
+	if (!ndlp) {
+		ndlp = lpfc_nlp_init(vport, nportid);
+		if (!ndlp)
+			return 1;
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 1;
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_RNID);
+	if (!elsiocb) {
+		/* This will trigger the release of the node just
+		 * allocated
+		 */
+		lpfc_nlp_put(ndlp);
+		return 1;
+	}
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in FARPR payload */
+	fp = (FARP *) (pcmd);
+	memset(fp, 0, sizeof(FARP));
+	lp = (uint32_t *) pcmd;
+	*lp++ = be32_to_cpu(nportid);
+	*lp++ = be32_to_cpu(vport->fc_myDID);
+	fp->Rflags = 0;
+	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
+
+	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	ondlp = lpfc_findnode_did(vport, nportid);
+	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
+		memcpy(&fp->OportName, &ondlp->nlp_portname,
+		       sizeof(struct lpfc_name));
+		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
+		       sizeof(struct lpfc_name));
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue FARPR:     did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitFARPR++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		/* The additional lpfc_nlp_put will cause the following
+		 * lpfc_els_free_iocb routine to trigger the release of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	/* This will cause the callback-function lpfc_cmpl_els_cmd to
+	 * trigger the release of the node.
+	 */
+	lpfc_nlp_put(ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nlp: pointer to a node-list data structure.
+ *
+ * This routine cancels the timer with a delayed IOCB-command retry for
+ * a @vport's @ndlp. It stops the timer for the delayed function retrial and
+ * removes the ELS retry event if it presents. In addition, if the
+ * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
+ * commands are sent for the @vport's nodes that require issuing discovery
+ * ADISC.
+ **/
+void
+lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_work_evt *evtp;
+
+	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+		return;
+	spin_lock_irq(shost->host_lock);
+	nlp->nlp_flag &= ~NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	del_timer_sync(&nlp->nlp_delayfunc);
+	nlp->nlp_last_elscmd = 0;
+	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
+		list_del_init(&nlp->els_retry_evt.evt_listp);
+		/* Decrement nlp reference count held for the delayed retry */
+		evtp = &nlp->els_retry_evt;
+		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
+	}
+	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		if (vport->num_disc_nodes) {
+			if (vport->port_state < LPFC_VPORT_READY) {
+				/* Check if there are more ADISCs to be sent */
+				lpfc_more_adisc(vport);
+			} else {
+				/* Check if there are more PLOGIs to be sent */
+				lpfc_more_plogi(vport);
+				if (vport->num_disc_nodes == 0) {
+					spin_lock_irq(shost->host_lock);
+					vport->fc_flag &= ~FC_NDISC_ACTIVE;
+					spin_unlock_irq(shost->host_lock);
+					lpfc_can_disctmo(vport);
+					lpfc_end_rscn(vport);
+				}
+			}
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
+ * @ptr: holder for the pointer to the timer function associated data (ndlp).
+ *
+ * This routine is invoked by the ndlp delayed-function timer to check
+ * whether there is any pending ELS retry event(s) with the node. If not, it
+ * simply returns. Otherwise, if there is at least one ELS delayed event, it
+ * adds the delayed events to the HBA work list and invokes the
+ * lpfc_worker_wake_up() routine to wake up worker thread to process the
+ * event. Note that lpfc_nlp_get() is called before posting the event to
+ * the work list to hold reference count of ndlp so that it guarantees the
+ * reference to ndlp will still be available when the worker thread gets
+ * to the event associated with the ndlp.
+ **/
+void
+lpfc_els_retry_delay(unsigned long ptr)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+	struct lpfc_vport *vport = ndlp->vport;
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned long flags;
+	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	if (!list_empty(&evtp->evt_listp)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	/* We need to hold the node by incrementing the reference
+	 * count until the queued work is done
+	 */
+	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+	if (evtp->evt_arg1) {
+		evtp->evt = LPFC_EVT_ELS_RETRY;
+		list_add_tail(&evtp->evt_listp, &phba->work_list);
+		lpfc_worker_wake_up(phba);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return;
+}
+
+/**
+ * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine is the worker-thread handler for processing the @ndlp delayed
+ * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
+ * the last ELS command from the associated ndlp and invokes the proper ELS
+ * function according to the delayed ELS command to retry the command.
+ **/
+void
+lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_vport *vport = ndlp->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	uint32_t cmd, retry;
+
+	spin_lock_irq(shost->host_lock);
+	cmd = ndlp->nlp_last_elscmd;
+	ndlp->nlp_last_elscmd = 0;
+
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+
+	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	/*
+	 * If a discovery event readded nlp_delayfunc after timer
+	 * firing and before processing the timer, cancel the
+	 * nlp_delayfunc.
+	 */
+	del_timer_sync(&ndlp->nlp_delayfunc);
+	retry = ndlp->nlp_retry;
+	ndlp->nlp_retry = 0;
+
+	switch (cmd) {
+	case ELS_CMD_FLOGI:
+		lpfc_issue_els_flogi(vport, ndlp, retry);
+		break;
+	case ELS_CMD_PLOGI:
+		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+		}
+		break;
+	case ELS_CMD_ADISC:
+		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+		}
+		break;
+	case ELS_CMD_PRLI:
+	case ELS_CMD_NVMEPRLI:
+		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+		}
+		break;
+	case ELS_CMD_LOGO:
+		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+		}
+		break;
+	case ELS_CMD_FDISC:
+		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+			lpfc_issue_els_fdisc(vport, ndlp, retry);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_els_retry - Make retry decision on an els command iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine makes a retry decision on an ELS command IOCB, which has
+ * failed. The following ELS IOCBs use this function for retrying the command
+ * when previously issued command responsed with error status: FLOGI, PLOGI,
+ * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * returned error status, it makes the decision whether a retry shall be
+ * issued for the command, and whether a retry shall be made immediately or
+ * delayed. In the former case, the corresponding ELS command issuing-function
+ * is called to retry the command. In the later case, the ELS command shall
+ * be posted to the ndlp delayed event and delayed function timer set to the
+ * ndlp for the delayed command issusing.
+ *
+ * Return code
+ *   0 - No retry of els command is made
+ *   1 - Immediate or delayed retry of els command is made
+ **/
+static int
+lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+	       struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	uint32_t *elscmd;
+	struct ls_rjt stat;
+	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
+	int logerr = 0;
+	uint32_t cmd = 0;
+	uint32_t did;
+
+
+	/* Note: context2 may be 0 for internal driver abort
+	 * of delays ELS command.
+	 */
+
+	if (pcmd && pcmd->virt) {
+		elscmd = (uint32_t *) (pcmd->virt);
+		cmd = *elscmd++;
+	}
+
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+		did = ndlp->nlp_DID;
+	else {
+		/* We should only hit this case for retrying PLOGI */
+		did = irsp->un.elsreq64.remoteID;
+		ndlp = lpfc_findnode_did(vport, did);
+		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		    && (cmd != ELS_CMD_PLOGI))
+			return 1;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
+		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+
+	switch (irsp->ulpStatus) {
+	case IOSTAT_FCP_RSP_ERROR:
+		break;
+	case IOSTAT_REMOTE_STOP:
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			/* This IO was aborted by the target, we don't
+			 * know the rxid and because we did not send the
+			 * ABTS we cannot generate and RRQ.
+			 */
+			lpfc_set_rrq_active(phba, ndlp,
+					 cmdiocb->sli4_lxritag, 0, 0);
+		}
+		break;
+	case IOSTAT_LOCAL_REJECT:
+		switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+		case IOERR_LOOP_OPEN_FAILURE:
+			if (cmd == ELS_CMD_FLOGI) {
+				if (PCI_DEVICE_ID_HORNET ==
+					phba->pcidev->device) {
+					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
+					phba->pport->fc_myDID = 0;
+					phba->alpa_map[0] = 0;
+					phba->alpa_map[1] = 0;
+				}
+			}
+			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
+				delay = 1000;
+			retry = 1;
+			break;
+
+		case IOERR_ILLEGAL_COMMAND:
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0124 Retry illegal cmd x%x "
+					 "retry:x%x delay:x%x\n",
+					 cmd, cmdiocb->retry, delay);
+			retry = 1;
+			/* All command's retry policy */
+			maxretry = 8;
+			if (cmdiocb->retry > 2)
+				delay = 1000;
+			break;
+
+		case IOERR_NO_RESOURCES:
+			logerr = 1; /* HBA out of resources */
+			retry = 1;
+			if (cmdiocb->retry > 100)
+				delay = 100;
+			maxretry = 250;
+			break;
+
+		case IOERR_ILLEGAL_FRAME:
+			delay = 100;
+			retry = 1;
+			break;
+
+		case IOERR_SEQUENCE_TIMEOUT:
+		case IOERR_INVALID_RPI:
+			if (cmd == ELS_CMD_PLOGI &&
+			    did == NameServer_DID) {
+				/* Continue forever if plogi to */
+				/* the nameserver fails */
+				maxretry = 0;
+				delay = 100;
+			}
+			retry = 1;
+			break;
+		}
+		break;
+
+	case IOSTAT_NPORT_RJT:
+	case IOSTAT_FABRIC_RJT:
+		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+			retry = 1;
+			break;
+		}
+		break;
+
+	case IOSTAT_NPORT_BSY:
+	case IOSTAT_FABRIC_BSY:
+		logerr = 1; /* Fabric / Remote NPort out of resources */
+		retry = 1;
+		break;
+
+	case IOSTAT_LS_RJT:
+		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+		/* Added for Vendor specifc support
+		 * Just keep retrying for these Rsn / Exp codes
+		 */
+		switch (stat.un.b.lsRjtRsnCode) {
+		case LSRJT_UNABLE_TPC:
+			/* The driver has a VALID PLOGI but the rport has
+			 * rejected the PRLI - can't do it now.  Delay
+			 * for 1 second and try again - don't care about
+			 * the explanation.
+			 */
+			if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
+				delay = 1000;
+				maxretry = lpfc_max_els_tries + 1;
+				retry = 1;
+				break;
+			}
+
+			/* Legacy bug fix code for targets with PLOGI delays. */
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_CMD_IN_PROGRESS) {
+				if (cmd == ELS_CMD_PLOGI) {
+					delay = 1000;
+					maxretry = 48;
+				}
+				retry = 1;
+				break;
+			}
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_CANT_GIVE_DATA) {
+				if (cmd == ELS_CMD_PLOGI) {
+					delay = 1000;
+					maxretry = 48;
+				}
+				retry = 1;
+				break;
+			}
+			if (cmd == ELS_CMD_PLOGI) {
+				delay = 1000;
+				maxretry = lpfc_max_els_tries + 1;
+				retry = 1;
+				break;
+			}
+			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+			  (cmd == ELS_CMD_FDISC) &&
+			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0125 FDISC Failed (x%x). "
+						 "Fabric out of resources\n",
+						 stat.un.lsRjtError);
+				lpfc_vport_set_state(vport,
+						     FC_VPORT_NO_FABRIC_RSCS);
+			}
+			break;
+
+		case LSRJT_LOGICAL_BSY:
+			if ((cmd == ELS_CMD_PLOGI) ||
+			    (cmd == ELS_CMD_PRLI) ||
+			    (cmd == ELS_CMD_NVMEPRLI)) {
+				delay = 1000;
+				maxretry = 48;
+			} else if (cmd == ELS_CMD_FDISC) {
+				/* FDISC retry policy */
+				maxretry = 48;
+				if (cmdiocb->retry >= 32)
+					delay = 1000;
+			}
+			retry = 1;
+			break;
+
+		case LSRJT_LOGICAL_ERR:
+			/* There are some cases where switches return this
+			 * error when they are not ready and should be returning
+			 * Logical Busy. We should delay every time.
+			 */
+			if (cmd == ELS_CMD_FDISC &&
+			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
+				maxretry = 3;
+				delay = 1000;
+				retry = 1;
+			} else if (cmd == ELS_CMD_FLOGI &&
+				   stat.un.b.lsRjtRsnCodeExp ==
+						LSEXP_NOTHING_MORE) {
+				vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+				retry = 1;
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0820 FLOGI Failed (x%x). "
+						 "BBCredit Not Supported\n",
+						 stat.un.lsRjtError);
+			}
+			break;
+
+		case LSRJT_PROTOCOL_ERR:
+			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+			  (cmd == ELS_CMD_FDISC) &&
+			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
+			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
+			  ) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0122 FDISC Failed (x%x). "
+						 "Fabric Detected Bad WWN\n",
+						 stat.un.lsRjtError);
+				lpfc_vport_set_state(vport,
+						     FC_VPORT_FABRIC_REJ_WWN);
+			}
+			break;
+		case LSRJT_VENDOR_UNIQUE:
+			if ((stat.un.b.vendorUnique == 0x45) &&
+			    (cmd == ELS_CMD_FLOGI)) {
+				goto out_retry;
+			}
+			break;
+		case LSRJT_CMD_UNSUPPORTED:
+			/* lpfc nvmet returns this type of LS_RJT when it
+			 * receives an FCP PRLI because lpfc nvmet only
+			 * support NVME.  ELS request is terminated for FCP4
+			 * on this rport.
+			 */
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
+				spin_unlock_irq(shost->host_lock);
+				retry = 0;
+				goto out_retry;
+			}
+			break;
+		}
+		break;
+
+	case IOSTAT_INTERMED_RSP:
+	case IOSTAT_BA_RJT:
+		break;
+
+	default:
+		break;
+	}
+
+	if (did == FDMI_DID)
+		retry = 1;
+
+	if ((cmd == ELS_CMD_FLOGI) &&
+	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
+	    !lpfc_error_lost_link(irsp)) {
+		/* FLOGI retry policy */
+		retry = 1;
+		/* retry FLOGI forever */
+		if (phba->link_flag != LS_LOOPBACK_MODE)
+			maxretry = 0;
+		else
+			maxretry = 2;
+
+		if (cmdiocb->retry >= 100)
+			delay = 5000;
+		else if (cmdiocb->retry >= 32)
+			delay = 1000;
+	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+		/* retry FDISCs every second up to devloss */
+		retry = 1;
+		maxretry = vport->cfg_devloss_tmo;
+		delay = 1000;
+	}
+
+	cmdiocb->retry++;
+	if (maxretry && (cmdiocb->retry >= maxretry)) {
+		phba->fc_stat.elsRetryExceeded++;
+		retry = 0;
+	}
+
+	if ((vport->load_flag & FC_UNLOADING) != 0)
+		retry = 0;
+
+out_retry:
+	if (retry) {
+		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
+			/* Stop retrying PLOGI and FDISC if in FCF discovery */
+			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+						 "2849 Stop retry ELS command "
+						 "x%x to remote NPORT x%x, "
+						 "Data: x%x x%x\n", cmd, did,
+						 cmdiocb->retry, delay);
+				return 0;
+			}
+		}
+
+		/* Retry ELS command <elsCmd> to remote NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "0107 Retry ELS command x%x to remote "
+				 "NPORT x%x Data: x%x x%x\n",
+				 cmd, did, cmdiocb->retry, delay);
+
+		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
+			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+			IOERR_NO_RESOURCES))) {
+			/* Don't reset timer for no resources */
+
+			/* If discovery / RSCN timer is running, reset it */
+			if (timer_pending(&vport->fc_disctmo) ||
+			    (vport->fc_flag & FC_RSCN_MODE))
+				lpfc_set_disctmo(vport);
+		}
+
+		phba->fc_stat.elsXmitRetry++;
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
+			phba->fc_stat.elsDelayRetry++;
+			ndlp->nlp_retry = cmdiocb->retry;
+
+			/* delay is specified in milliseconds */
+			mod_timer(&ndlp->nlp_delayfunc,
+				jiffies + msecs_to_jiffies(delay));
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			if ((cmd == ELS_CMD_PRLI) ||
+			    (cmd == ELS_CMD_NVMEPRLI))
+				lpfc_nlp_set_state(vport, ndlp,
+					NLP_STE_PRLI_ISSUE);
+			else
+				lpfc_nlp_set_state(vport, ndlp,
+					NLP_STE_NPR_NODE);
+			ndlp->nlp_last_elscmd = cmd;
+
+			return 1;
+		}
+		switch (cmd) {
+		case ELS_CMD_FLOGI:
+			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_FDISC:
+			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_PLOGI:
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+				ndlp->nlp_prev_state = ndlp->nlp_state;
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_PLOGI_ISSUE);
+			}
+			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_ADISC:
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_PRLI:
+		case ELS_CMD_NVMEPRLI:
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_LOGO:
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
+			return 1;
+		}
+	}
+	/* No retry ELS command <elsCmd> to remote NPORT <did> */
+	if (logerr) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			 "0137 No retry ELS command x%x to remote "
+			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
+			 cmd, did, irsp->ulpStatus,
+			 irsp->un.ulpWord[4]);
+	}
+	else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0108 No retry ELS command x%x to remote "
+			 "NPORT x%x Retried:%d Error:x%x/%x\n",
+			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
+			 irsp->un.ulpWord[4]);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
+ *
+ * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
+ * associated with a command IOCB back to the lpfc DMA buffer pool. It first
+ * checks to see whether there is a lpfc DMA buffer associated with the
+ * response of the command IOCB. If so, it will be released before releasing
+ * the lpfc DMA buffer associated with the IOCB itself.
+ *
+ * Return code
+ *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
+static int
+lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
+{
+	struct lpfc_dmabuf *buf_ptr;
+
+	/* Free the response before processing the command. */
+	if (!list_empty(&buf_ptr1->list)) {
+		list_remove_head(&buf_ptr1->list, buf_ptr,
+				 struct lpfc_dmabuf,
+				 list);
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+	}
+	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+	kfree(buf_ptr1);
+	return 0;
+}
+
+/**
+ * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr: pointer to the lpfc dma buffer data structure.
+ *
+ * This routine releases the lpfc Direct Memory Access (DMA) buffer
+ * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
+ * pool.
+ *
+ * Return code
+ *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
+static int
+lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
+{
+	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+	kfree(buf_ptr);
+	return 0;
+}
+
+/**
+ * lpfc_els_free_iocb - Free a command iocb and its associated resources
+ * @phba: pointer to lpfc hba data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine frees a command IOCB and its associated resources. The
+ * command IOCB data structure contains the reference to various associated
+ * resources, these fields must be set to NULL if the associated reference
+ * not present:
+ *   context1 - reference to ndlp
+ *   context2 - reference to cmd
+ *   context2->next - reference to rsp
+ *   context3 - reference to bpl
+ *
+ * It first properly decrements the reference count held on ndlp for the
+ * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
+ * set, it invokes the lpfc_els_free_data() routine to release the Direct
+ * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
+ * adds the DMA buffer the @phba data structure for the delayed release.
+ * If reference to the Buffer Pointer List (BPL) is present, the
+ * lpfc_els_free_bpl() routine is invoked to release the DMA memory
+ * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
+ * invoked to release the IOCB data structure back to @phba IOCBQ list.
+ *
+ * Return code
+ *   0 - Success (currently, always return 0)
+ **/
+int
+lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
+{
+	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
+	if (ndlp) {
+		if (ndlp->nlp_flag & NLP_DEFER_RM) {
+			lpfc_nlp_put(ndlp);
+
+			/* If the ndlp is not being used by another discovery
+			 * thread, free it.
+			 */
+			if (!lpfc_nlp_not_used(ndlp)) {
+				/* If ndlp is being used by another discovery
+				 * thread, just clear NLP_DEFER_RM
+				 */
+				ndlp->nlp_flag &= ~NLP_DEFER_RM;
+			}
+		}
+		else
+			lpfc_nlp_put(ndlp);
+		elsiocb->context1 = NULL;
+	}
+	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
+	if (elsiocb->context2) {
+		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+			/* Firmware could still be in progress of DMAing
+			 * payload, so don't free data buffer till after
+			 * a hbeat.
+			 */
+			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
+			buf_ptr = elsiocb->context2;
+			elsiocb->context2 = NULL;
+			if (buf_ptr) {
+				buf_ptr1 = NULL;
+				spin_lock_irq(&phba->hbalock);
+				if (!list_empty(&buf_ptr->list)) {
+					list_remove_head(&buf_ptr->list,
+						buf_ptr1, struct lpfc_dmabuf,
+						list);
+					INIT_LIST_HEAD(&buf_ptr1->list);
+					list_add_tail(&buf_ptr1->list,
+						&phba->elsbuf);
+					phba->elsbuf_cnt++;
+				}
+				INIT_LIST_HEAD(&buf_ptr->list);
+				list_add_tail(&buf_ptr->list, &phba->elsbuf);
+				phba->elsbuf_cnt++;
+				spin_unlock_irq(&phba->hbalock);
+			}
+		} else {
+			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+			lpfc_els_free_data(phba, buf_ptr1);
+			elsiocb->context2 = NULL;
+		}
+	}
+
+	if (elsiocb->context3) {
+		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+		lpfc_els_free_bpl(phba, buf_ptr);
+		elsiocb->context3 = NULL;
+	}
+	lpfc_sli_release_iocbq(phba, elsiocb);
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the Logout (LOGO)
+ * Accept (ACC) Response ELS command. This routine is invoked to indicate
+ * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
+ * release the ndlp if it has the last reference remaining (reference count
+ * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * field to NULL to inform the following lpfc_els_free_iocb() routine no
+ * ndlp reference count needs to be decremented. Otherwise, the ndlp
+ * reference use-count shall be decremented by the lpfc_els_free_iocb()
+ * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
+ * IOCB data structure.
+ **/
+static void
+lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		       struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+	/* ACC to LOGO completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0109 ACC to LOGO completes to NPort x%x "
+			 "Data: x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+
+	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+		/* NPort Recovery mode or node is just allocated */
+		if (!lpfc_nlp_not_used(ndlp)) {
+			/* If the ndlp is being used by another discovery
+			 * thread, just unregister the RPI.
+			 */
+			lpfc_unreg_rpi(vport, ndlp);
+		} else {
+			/* Indicate the node has already released, should
+			 * not reference to it from within lpfc_els_free_iocb.
+			 */
+			cmdiocb->context1 = NULL;
+		}
+	}
+
+	/*
+	 * The driver received a LOGO from the rport and has ACK'd it.
+	 * At this point, the driver is done so release the IOCB
+	 */
+	lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for unregister default
+ * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
+ * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
+ * decrements the ndlp reference count held for this completion callback
+ * function. After that, it invokes the lpfc_nlp_not_used() to check
+ * whether there is only one reference left on the ndlp. If so, it will
+ * perform one more decrement and trigger the release of the ndlp.
+ **/
+void
+lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	if (ndlp) {
+		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+				 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+				 kref_read(&ndlp->kref),
+				 ndlp->nlp_usg_map, ndlp);
+		if (NLP_CHK_NODE_ACT(ndlp)) {
+			lpfc_nlp_put(ndlp);
+			/* This is the end of the default RPI cleanup logic for
+			 * this ndlp. If no other discovery threads are using
+			 * this ndlp, free all resources associated with it.
+			 */
+			lpfc_nlp_not_used(ndlp);
+		} else {
+			lpfc_drop_node(ndlp->vport, ndlp);
+		}
+	}
+
+	return;
+}
+
+/**
+ * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for ELS Response IOCB
+ * command. In normal case, this callback function just properly sets the
+ * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+ * field in the command IOCB is not NULL, the referred mailbox command will
+ * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+ * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+ * link down event occurred during the discovery, the lpfc_nlp_not_used()
+ * routine shall be invoked trying to release the ndlp if no other threads
+ * are currently referring it.
+ **/
+static void
+lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
+	IOCB_t  *irsp;
+	uint8_t *pcmd;
+	LPFC_MBOXQ_t *mbox = NULL;
+	struct lpfc_dmabuf *mp = NULL;
+	uint32_t ls_rjt = 0;
+
+	irsp = &rspiocb->iocb;
+
+	if (cmdiocb->context_un.mbox)
+		mbox = cmdiocb->context_un.mbox;
+
+	/* First determine if this is a LS_RJT cmpl. Note, this callback
+	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
+	 */
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+		/* A LS_RJT associated with Default RPI cleanup has its own
+		 * separate code path.
+		 */
+		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+			ls_rjt = 1;
+	}
+
+	/* Check to see if link went down during discovery */
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
+		if (mbox) {
+			mp = (struct lpfc_dmabuf *) mbox->context1;
+			if (mp) {
+				lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			mempool_free(mbox, phba->mbox_mem_pool);
+		}
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+			if (lpfc_nlp_not_used(ndlp)) {
+				ndlp = NULL;
+				/* Indicate the node has already released,
+				 * should not reference to it from within
+				 * the routine lpfc_els_free_iocb.
+				 */
+				cmdiocb->context1 = NULL;
+			}
+		goto out;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		cmdiocb->iocb.un.elsreq64.remoteID);
+	/* ELS response tag <ulpIoTag> completes */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0110 ELS response tag x%x completes "
+			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
+			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	if (mbox) {
+		if ((rspiocb->iocb.ulpStatus == 0)
+		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+			if (!lpfc_unreg_rpi(vport, ndlp) &&
+			    (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
+			     ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0314 PLOGI recov DID x%x "
+					"Data: x%x x%x x%x\n",
+					ndlp->nlp_DID, ndlp->nlp_state,
+					ndlp->nlp_rpi, ndlp->nlp_flag);
+				mp = mbox->context1;
+				if (mp) {
+					lpfc_mbuf_free(phba, mp->virt,
+						       mp->phys);
+					kfree(mp);
+				}
+				mempool_free(mbox, phba->mbox_mem_pool);
+				goto out;
+			}
+
+			/* Increment reference count to ndlp to hold the
+			 * reference to ndlp for the callback function.
+			 */
+			mbox->context2 = lpfc_nlp_get(ndlp);
+			mbox->vport = vport;
+			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
+				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+			}
+			else {
+				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+				ndlp->nlp_prev_state = ndlp->nlp_state;
+				lpfc_nlp_set_state(vport, ndlp,
+					   NLP_STE_REG_LOGIN_ISSUE);
+			}
+
+			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+			    != MBX_NOT_FINISHED)
+				goto out;
+
+			/* Decrement the ndlp reference count we
+			 * set for this failed mailbox command.
+			 */
+			lpfc_nlp_put(ndlp);
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+
+			/* ELS rsp: Cannot issue reg_login for <NPortid> */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0138 ELS rsp: Cannot issue reg_login for x%x "
+				"Data: x%x x%x x%x\n",
+				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+				ndlp->nlp_rpi);
+
+			if (lpfc_nlp_not_used(ndlp)) {
+				ndlp = NULL;
+				/* Indicate node has already been released,
+				 * should not reference to it from within
+				 * the routine lpfc_els_free_iocb.
+				 */
+				cmdiocb->context1 = NULL;
+			}
+		} else {
+			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
+			if (!lpfc_error_lost_link(irsp) &&
+			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+				if (lpfc_nlp_not_used(ndlp)) {
+					ndlp = NULL;
+					/* Indicate node has already been
+					 * released, should not reference
+					 * to it from within the routine
+					 * lpfc_els_free_iocb.
+					 */
+					cmdiocb->context1 = NULL;
+				}
+			}
+		}
+		mp = (struct lpfc_dmabuf *) mbox->context1;
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+out:
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
+		spin_lock_irq(shost->host_lock);
+		if (mbox)
+			ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+		ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
+		spin_unlock_irq(shost->host_lock);
+
+		/* If the node is not being used by another discovery thread,
+		 * and we are sending a reject, we are done with it.
+		 * Release driver reference count here and free associated
+		 * resources.
+		 */
+		if (ls_rjt)
+			if (lpfc_nlp_not_used(ndlp))
+				/* Indicate node has already been released,
+				 * should not reference to it from within
+				 * the routine lpfc_els_free_iocb.
+				 */
+				cmdiocb->context1 = NULL;
+
+	}
+
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @flag: the els command code to be accepted.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issues an Accept (ACC) response IOCB
+ * command. It uses the @flag to properly set up the IOCB field for the
+ * specific ACC response command to be issued and invokes the
+ * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
+ * @mbox pointer is passed in, it will be put into the context_un.mbox
+ * field of the IOCB for the completion callback function to issue the
+ * mailbox command to the HBA later when callback is invoked.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the corresponding response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued acc response
+ *   1 - Failed to issue acc response
+ **/
+int
+lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+		 LPFC_MBOXQ_t *mbox)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	IOCB_t *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	struct serv_parm *sp;
+	uint16_t cmdsize;
+	int rc;
+	ELS_PKT *els_pkt_ptr;
+
+	oldcmd = &oldiocb->iocb;
+
+	switch (flag) {
+	case ELS_CMD_ACC:
+		cmdsize = sizeof(uint32_t);
+		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+		if (!elsiocb) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+			spin_unlock_irq(shost->host_lock);
+			return 1;
+		}
+
+		icmd = &elsiocb->iocb;
+		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+		pcmd += sizeof(uint32_t);
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+			"Issue ACC:       did:x%x flg:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+		break;
+	case ELS_CMD_FLOGI:
+	case ELS_CMD_PLOGI:
+		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
+		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+		if (!elsiocb)
+			return 1;
+
+		icmd = &elsiocb->iocb;
+		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+		if (mbox)
+			elsiocb->context_un.mbox = mbox;
+
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+		pcmd += sizeof(uint32_t);
+		sp = (struct serv_parm *)pcmd;
+
+		if (flag == ELS_CMD_FLOGI) {
+			/* Copy the received service parameters back */
+			memcpy(sp, &phba->fc_fabparam,
+			       sizeof(struct serv_parm));
+
+			/* Clear the F_Port bit */
+			sp->cmn.fPort = 0;
+
+			/* Mark all class service parameters as invalid */
+			sp->cls1.classValid = 0;
+			sp->cls2.classValid = 0;
+			sp->cls3.classValid = 0;
+			sp->cls4.classValid = 0;
+
+			/* Copy our worldwide names */
+			memcpy(&sp->portName, &vport->fc_sparam.portName,
+			       sizeof(struct lpfc_name));
+			memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
+			       sizeof(struct lpfc_name));
+		} else {
+			memcpy(pcmd, &vport->fc_sparam,
+			       sizeof(struct serv_parm));
+
+			sp->cmn.valid_vendor_ver_level = 0;
+			memset(sp->un.vendorVersion, 0,
+			       sizeof(sp->un.vendorVersion));
+			sp->cmn.bbRcvSizeMsb &= 0xF;
+
+			/* If our firmware supports this feature, convey that
+			 * info to the target using the vendor specific field.
+			 */
+			if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
+				sp->cmn.valid_vendor_ver_level = 1;
+				sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
+				sp->un.vv.flags =
+					cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
+			}
+		}
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+		break;
+	case ELS_CMD_PRLO:
+		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
+		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
+		if (!elsiocb)
+			return 1;
+
+		icmd = &elsiocb->iocb;
+		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+		       sizeof(uint32_t) + sizeof(PRLO));
+		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
+		els_pkt_ptr = (ELS_PKT *) pcmd;
+		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+			"Issue ACC PRLO:  did:x%x flg:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+		break;
+	default:
+		return 1;
+	}
+	/* Xmit ELS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
+			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
+			 "fc_flag x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi, vport->fc_flag);
+	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+		spin_lock_irq(shost->host_lock);
+		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+			ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
+			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+		spin_unlock_irq(shost->host_lock);
+		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+	} else {
+		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	}
+
+	phba->fc_stat.elsXmitACC++;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @rejectError:
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issue an Reject (RJT) response IOCB
+ * command. If a @mbox pointer is passed in, it will be put into the
+ * context_un.mbox field of the IOCB for the completion callback function
+ * to issue to the HBA later.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the reject response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued reject response
+ *   1 - Failed to issue reject response
+ **/
+int
+lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+		    LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	IOCB_t *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	cmdsize = 2 * sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+	pcmd += sizeof(uint32_t);
+	*((uint32_t *) (pcmd)) = rejectError;
+
+	if (mbox)
+		elsiocb->context_un.mbox = mbox;
+
+	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0129 Xmit ELS RJT x%x response tag x%x "
+			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+			 "rpi x%x\n",
+			 rejectError, elsiocb->iotag,
+			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
+
+	phba->fc_stat.elsXmitLSRJT++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Address
+ * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC Accept response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued acc adisc response
+ *   1 - Failed to issue adisc acc response
+ **/
+int
+lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+		       struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	ADISC *ap;
+	IOCB_t *icmd, *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	/* Xmit ADISC ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0130 Xmit ADISC ACC response iotag x%x xri: "
+			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+
+	ap = (ADISC *) (pcmd);
+	ap->hardAL_PA = phba->fc_pref_ALPA;
+	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	ap->DID = be32_to_cpu(vport->fc_myDID);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC ADISC: did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Process
+ * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI Accept response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued acc prli response
+ *   1 - Failed to issue acc prli response
+ **/
+int
+lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+		      struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	PRLI *npr;
+	struct lpfc_nvme_prli *npr_nvme;
+	lpfc_vpd_t *vpd;
+	IOCB_t *icmd;
+	IOCB_t *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	uint32_t prli_fc4_req, *req_payload;
+	struct lpfc_dmabuf *req_buf;
+	int rc;
+	u32 elsrspcmd;
+
+	/* Need the incoming PRLI payload to determine if the ACC is for an
+	 * FC4 or NVME PRLI type.  The PRLI type is at word 1.
+	 */
+	req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
+	req_payload = (((uint32_t *)req_buf->virt) + 1);
+
+	/* PRLI type payload is at byte 3 for FCP or NVME. */
+	prli_fc4_req = be32_to_cpu(*req_payload);
+	prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "6127 PRLI_ACC:  Req Type x%x, Word1 x%08x\n",
+			 prli_fc4_req, *((uint32_t *)req_payload));
+
+	if (prli_fc4_req == PRLI_FCP_TYPE) {
+		cmdsize = sizeof(uint32_t) + sizeof(PRLI);
+		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
+		cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
+		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
+	} else {
+		return 1;
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+		ndlp->nlp_DID, elsrspcmd);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	/* Xmit PRLI ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	memset(pcmd, 0, cmdsize);
+
+	*((uint32_t *)(pcmd)) = elsrspcmd;
+	pcmd += sizeof(uint32_t);
+
+	/* For PRLI, remainder of payload is PRLI parameter page */
+	vpd = &phba->vpd;
+
+	if (prli_fc4_req == PRLI_FCP_TYPE) {
+		/*
+		 * If the remote port is a target and our firmware version
+		 * is 3.20 or later, set the following bits for FC-TAPE
+		 * support.
+		 */
+		npr = (PRLI *) pcmd;
+		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+		    (vpd->rev.feaLevelHigh >= 0x02)) {
+			npr->ConfmComplAllowed = 1;
+			npr->Retry = 1;
+			npr->TaskRetryIdReq = 1;
+		}
+		npr->acceptRspCode = PRLI_REQ_EXECUTED;
+		npr->estabImagePair = 1;
+		npr->readXferRdyDis = 1;
+		npr->ConfmComplAllowed = 1;
+		npr->prliType = PRLI_FCP_TYPE;
+		npr->initiatorFunc = 1;
+	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
+		/* Respond with an NVME PRLI Type */
+		npr_nvme = (struct lpfc_nvme_prli *) pcmd;
+		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
+		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
+		bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
+		if (phba->nvmet_support) {
+			bf_set(prli_tgt, npr_nvme, 1);
+			bf_set(prli_disc, npr_nvme, 1);
+			if (phba->cfg_nvme_enable_fb) {
+				bf_set(prli_fba, npr_nvme, 1);
+
+				/* TBD.  Target mode needs to post buffers
+				 * that support the configured first burst
+				 * byte size.
+				 */
+				bf_set(prli_fb_sz, npr_nvme,
+				       phba->cfg_nvmet_fb_size);
+			}
+		} else {
+			bf_set(prli_init, npr_nvme, 1);
+		}
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+				 "6015 NVME issue PRLI ACC word1 x%08x "
+				 "word4 x%08x word5 x%08x flag x%x, "
+				 "fcp_info x%x nlp_type x%x\n",
+				 npr_nvme->word1, npr_nvme->word4,
+				 npr_nvme->word5, ndlp->nlp_flag,
+				 ndlp->nlp_fcp_info, ndlp->nlp_type);
+		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
+		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
+		npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
+	} else
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
+				 prli_fc4_req, ndlp->nlp_fc4_type,
+				 ndlp->nlp_DID);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC PRLI:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @format: rnid command format.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a Request Node Identification Data (RNID) Accept
+ * (ACC) response. It constructs the RNID ACC response command according to
+ * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
+ * issue the response. Note that this command does not need to hold the ndlp
+ * reference count for the callback. So, the ndlp reference count taken by
+ * the lpfc_prep_els_iocb() routine is put back and the context1 field of
+ * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
+ * there is no ndlp reference available.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function. However, for the RNID Accept Response ELS command,
+ * this is undone later by this routine after the IOCB is allocated.
+ *
+ * Return code
+ *   0 - Successfully issued acc rnid response
+ *   1 - Failed to issue acc rnid response
+ **/
+static int
+lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
+		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	RNID *rn;
+	IOCB_t *icmd, *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+					+ (2 * sizeof(struct lpfc_name));
+	if (format)
+		cmdsize += sizeof(RNID_TOP_DISC);
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	/* Xmit RNID ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+
+	memset(pcmd, 0, sizeof(RNID));
+	rn = (RNID *) (pcmd);
+	rn->Format = format;
+	rn->CommonLen = (2 * sizeof(struct lpfc_name));
+	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	switch (format) {
+	case 0:
+		rn->SpecificLen = 0;
+		break;
+	case RNID_TOPOLOGY_DISC:
+		rn->SpecificLen = sizeof(RNID_TOP_DISC);
+		memcpy(&rn->un.topologyDisc.portName,
+		       &vport->fc_portname, sizeof(struct lpfc_name));
+		rn->un.topologyDisc.unitType = RNID_HBA;
+		rn->un.topologyDisc.physPort = 0;
+		rn->un.topologyDisc.attachedNodes = 0;
+		break;
+	default:
+		rn->CommonLen = 0;
+		rn->SpecificLen = 0;
+		break;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC RNID:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @iocb: pointer to the lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return
+ **/
+static void
+lpfc_els_clear_rrq(struct lpfc_vport *vport,
+		   struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	uint8_t *pcmd;
+	struct RRQ *rrq;
+	uint16_t rxid;
+	uint16_t xri;
+	struct lpfc_node_rrq *prrq;
+
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+	pcmd += sizeof(uint32_t);
+	rrq = (struct RRQ *)pcmd;
+	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+	rxid = bf_get(rrq_rxid, rrq);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
+			" x%x x%x\n",
+			be32_to_cpu(bf_get(rrq_did, rrq)),
+			bf_get(rrq_oxid, rrq),
+			rxid,
+			iocb->iotag, iocb->iocb.ulpContext);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
+		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+		xri = bf_get(rrq_oxid, rrq);
+	else
+		xri = rxid;
+	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
+	if (prrq)
+		lpfc_clr_rrq_active(phba, xri, prrq);
+	return;
+}
+
+/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully issued acc echo response
+ *   1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+	/* The accumulated length can exceed the BPL_SIZE.  For
+	 * now, use this as the limit
+	 */
+	if (cmdsize > LPFC_BPL_SIZE)
+		cmdsize = LPFC_BPL_SIZE;
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
+	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+
+	/* Xmit ECHO ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC ECHO:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Address Discover (ADISC) ELS commands to those
+ * N_Ports which are in node port recovery state and ADISC has not been issued
+ * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
+ * lpfc_issue_els_adisc() routine, the per @vport number of discover count
+ * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
+ * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
+ * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
+ * IOCBs quit for later pick up. On the other hand, after walking through
+ * all the ndlps with the @vport and there is none ADISC IOCB issued, the
+ * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
+ * no more ADISC need to be sent.
+ *
+ * Return code
+ *    The number of N_Ports with adisc issued.
+ **/
+int
+lpfc_els_disc_adisc(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int sentadisc = 0;
+
+	/* go thru NPR nodes and issue any remaining ELS ADISCs */
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, 0);
+			sentadisc++;
+			vport->num_disc_nodes++;
+			if (vport->num_disc_nodes >=
+			    vport->cfg_discovery_threads) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_NLP_MORE;
+				spin_unlock_irq(shost->host_lock);
+				break;
+			}
+		}
+	}
+	if (sentadisc == 0) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NLP_MORE;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return sentadisc;
+}
+
+/**
+ * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
+ * which are in node port recovery state, with a @vport. Each time an ELS
+ * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
+ * the per @vport number of discover count (num_disc_nodes) shall be
+ * incremented. If the num_disc_nodes reaches a pre-configured threshold
+ * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
+ * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
+ * later pick up. On the other hand, after walking through all the ndlps with
+ * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
+ * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
+ * PLOGI need to be sent.
+ *
+ * Return code
+ *   The number of N_Ports with plogi issued.
+ **/
+int
+lpfc_els_disc_plogi(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int sentplogi = 0;
+
+	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+				(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+				(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
+				(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+			sentplogi++;
+			vport->num_disc_nodes++;
+			if (vport->num_disc_nodes >=
+					vport->cfg_discovery_threads) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_NLP_MORE;
+				spin_unlock_irq(shost->host_lock);
+				break;
+			}
+		}
+	}
+	if (sentplogi) {
+		lpfc_set_disctmo(vport);
+	}
+	else {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NLP_MORE;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return sentplogi;
+}
+
+static uint32_t
+lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
+		uint32_t word0)
+{
+
+	desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
+	desc->payload.els_req = word0;
+	desc->length = cpu_to_be32(sizeof(desc->payload));
+
+	return sizeof(struct fc_rdp_link_service_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
+		uint8_t *page_a0, uint8_t *page_a2)
+{
+	uint16_t wavelength;
+	uint16_t temperature;
+	uint16_t rx_power;
+	uint16_t tx_bias;
+	uint16_t tx_power;
+	uint16_t vcc;
+	uint16_t flag = 0;
+	struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
+	struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
+
+	desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
+
+	trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
+			&page_a0[SSF_TRANSCEIVER_CODE_B4];
+	trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
+			&page_a0[SSF_TRANSCEIVER_CODE_B5];
+
+	if ((trasn_code_byte4->fc_sw_laser) ||
+	    (trasn_code_byte5->fc_sw_laser_sl) ||
+	    (trasn_code_byte5->fc_sw_laser_sn)) {  /* check if its short WL */
+		flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
+	} else if (trasn_code_byte4->fc_lw_laser) {
+		wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
+			page_a0[SSF_WAVELENGTH_B0];
+		if (wavelength == SFP_WAVELENGTH_LC1310)
+			flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
+		if (wavelength == SFP_WAVELENGTH_LL1550)
+			flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
+	}
+	/* check if its SFP+ */
+	flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
+			SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
+					<< SFP_FLAG_CT_SHIFT;
+
+	/* check if its OPTICAL */
+	flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
+			SFP_FLAG_IS_OPTICAL_PORT : 0)
+					<< SFP_FLAG_IS_OPTICAL_SHIFT;
+
+	temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
+		page_a2[SFF_TEMPERATURE_B0]);
+	vcc = (page_a2[SFF_VCC_B1] << 8 |
+		page_a2[SFF_VCC_B0]);
+	tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
+		page_a2[SFF_TXPOWER_B0]);
+	tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
+		page_a2[SFF_TX_BIAS_CURRENT_B0]);
+	rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
+		page_a2[SFF_RXPOWER_B0]);
+	desc->sfp_info.temperature = cpu_to_be16(temperature);
+	desc->sfp_info.rx_power = cpu_to_be16(rx_power);
+	desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
+	desc->sfp_info.tx_power = cpu_to_be16(tx_power);
+	desc->sfp_info.vcc = cpu_to_be16(vcc);
+
+	desc->sfp_info.flags = cpu_to_be16(flag);
+	desc->length = cpu_to_be32(sizeof(desc->sfp_info));
+
+	return sizeof(struct fc_rdp_sfp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
+		READ_LNK_VAR *stat)
+{
+	uint32_t type;
+
+	desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
+
+	type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
+
+	desc->info.port_type = cpu_to_be32(type);
+
+	desc->info.link_status.link_failure_cnt =
+		cpu_to_be32(stat->linkFailureCnt);
+	desc->info.link_status.loss_of_synch_cnt =
+		cpu_to_be32(stat->lossSyncCnt);
+	desc->info.link_status.loss_of_signal_cnt =
+		cpu_to_be32(stat->lossSignalCnt);
+	desc->info.link_status.primitive_seq_proto_err =
+		cpu_to_be32(stat->primSeqErrCnt);
+	desc->info.link_status.invalid_trans_word =
+		cpu_to_be32(stat->invalidXmitWord);
+	desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
+
+	desc->length = cpu_to_be32(sizeof(desc->info));
+
+	return sizeof(struct fc_rdp_link_error_status_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
+		      struct lpfc_vport *vport)
+{
+	uint32_t bbCredit;
+
+	desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
+
+	bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
+			(vport->fc_sparam.cmn.bbCreditMsb << 8);
+	desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
+	if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+		bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
+			(vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
+		desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
+	} else {
+		desc->bbc_info.attached_port_bbc = 0;
+	}
+
+	desc->bbc_info.rtt = 0;
+	desc->length = cpu_to_be32(sizeof(desc->bbc_info));
+
+	return sizeof(struct fc_rdp_bbc_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
+			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
+{
+	uint32_t flags = 0;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
+	desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
+	desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
+	desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
+
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
+		flags |= RDP_OET_HIGH_ALARM;
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
+		flags |= RDP_OET_LOW_ALARM;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
+		flags |= RDP_OET_HIGH_WARNING;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
+		flags |= RDP_OET_LOW_WARNING;
+
+	flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+	return sizeof(struct fc_rdp_oed_sfp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
+			      struct fc_rdp_oed_sfp_desc *desc,
+			      uint8_t *page_a2)
+{
+	uint32_t flags = 0;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
+	desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
+	desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
+	desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
+
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
+		flags |= RDP_OET_HIGH_ALARM;
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
+		flags |= RDP_OET_LOW_ALARM;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
+		flags |= RDP_OET_HIGH_WARNING;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
+		flags |= RDP_OET_LOW_WARNING;
+
+	flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+	return sizeof(struct fc_rdp_oed_sfp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
+			     struct fc_rdp_oed_sfp_desc *desc,
+			     uint8_t *page_a2)
+{
+	uint32_t flags = 0;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
+	desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
+	desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
+	desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
+
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
+		flags |= RDP_OET_HIGH_ALARM;
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
+		flags |= RDP_OET_LOW_ALARM;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
+		flags |= RDP_OET_HIGH_WARNING;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
+		flags |= RDP_OET_LOW_WARNING;
+
+	flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+	return sizeof(struct fc_rdp_oed_sfp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
+			      struct fc_rdp_oed_sfp_desc *desc,
+			      uint8_t *page_a2)
+{
+	uint32_t flags = 0;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
+	desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
+	desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
+	desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
+
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
+		flags |= RDP_OET_HIGH_ALARM;
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
+		flags |= RDP_OET_LOW_ALARM;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
+		flags |= RDP_OET_HIGH_WARNING;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
+		flags |= RDP_OET_LOW_WARNING;
+
+	flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+	return sizeof(struct fc_rdp_oed_sfp_desc);
+}
+
+
+static uint32_t
+lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
+			      struct fc_rdp_oed_sfp_desc *desc,
+			      uint8_t *page_a2)
+{
+	uint32_t flags = 0;
+
+	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+	desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
+	desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
+	desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
+	desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
+
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
+		flags |= RDP_OET_HIGH_ALARM;
+	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
+		flags |= RDP_OET_LOW_ALARM;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
+		flags |= RDP_OET_HIGH_WARNING;
+	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
+		flags |= RDP_OET_LOW_WARNING;
+
+	flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
+	desc->oed_info.function_flags = cpu_to_be32(flags);
+	desc->length = cpu_to_be32(sizeof(desc->oed_info));
+	return sizeof(struct fc_rdp_oed_sfp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
+		      uint8_t *page_a0, struct lpfc_vport *vport)
+{
+	desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
+	memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
+	memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
+	memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
+	memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
+	memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
+	desc->length = cpu_to_be32(sizeof(desc->opd_info));
+	return sizeof(struct fc_rdp_opd_sfp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
+{
+	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
+		return 0;
+	desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
+
+	desc->info.CorrectedBlocks =
+		cpu_to_be32(stat->fecCorrBlkCount);
+	desc->info.UncorrectableBlocks =
+		cpu_to_be32(stat->fecUncorrBlkCount);
+
+	desc->length = cpu_to_be32(sizeof(desc->info));
+
+	return sizeof(struct fc_fec_rdp_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
+{
+	uint16_t rdp_cap = 0;
+	uint16_t rdp_speed;
+
+	desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
+
+	switch (phba->fc_linkspeed) {
+	case LPFC_LINK_SPEED_1GHZ:
+		rdp_speed = RDP_PS_1GB;
+		break;
+	case LPFC_LINK_SPEED_2GHZ:
+		rdp_speed = RDP_PS_2GB;
+		break;
+	case LPFC_LINK_SPEED_4GHZ:
+		rdp_speed = RDP_PS_4GB;
+		break;
+	case LPFC_LINK_SPEED_8GHZ:
+		rdp_speed = RDP_PS_8GB;
+		break;
+	case LPFC_LINK_SPEED_10GHZ:
+		rdp_speed = RDP_PS_10GB;
+		break;
+	case LPFC_LINK_SPEED_16GHZ:
+		rdp_speed = RDP_PS_16GB;
+		break;
+	case LPFC_LINK_SPEED_32GHZ:
+		rdp_speed = RDP_PS_32GB;
+		break;
+	default:
+		rdp_speed = RDP_PS_UNKNOWN;
+		break;
+	}
+
+	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
+
+	if (phba->lmt & LMT_32Gb)
+		rdp_cap |= RDP_PS_32GB;
+	if (phba->lmt & LMT_16Gb)
+		rdp_cap |= RDP_PS_16GB;
+	if (phba->lmt & LMT_10Gb)
+		rdp_cap |= RDP_PS_10GB;
+	if (phba->lmt & LMT_8Gb)
+		rdp_cap |= RDP_PS_8GB;
+	if (phba->lmt & LMT_4Gb)
+		rdp_cap |= RDP_PS_4GB;
+	if (phba->lmt & LMT_2Gb)
+		rdp_cap |= RDP_PS_2GB;
+	if (phba->lmt & LMT_1Gb)
+		rdp_cap |= RDP_PS_1GB;
+
+	if (rdp_cap == 0)
+		rdp_cap = RDP_CAP_UNKNOWN;
+	if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
+		rdp_cap |= RDP_CAP_USER_CONFIGURED;
+
+	desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
+	desc->length = cpu_to_be32(sizeof(desc->info));
+	return sizeof(struct fc_rdp_port_speed_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
+		struct lpfc_vport *vport)
+{
+
+	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
+
+	memcpy(desc->port_names.wwnn, &vport->fc_nodename,
+			sizeof(desc->port_names.wwnn));
+
+	memcpy(desc->port_names.wwpn, &vport->fc_portname,
+			sizeof(desc->port_names.wwpn));
+
+	desc->length = cpu_to_be32(sizeof(desc->port_names));
+	return sizeof(struct fc_rdp_port_name_desc);
+}
+
+static uint32_t
+lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
+		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+
+	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
+	if (vport->fc_flag & FC_FABRIC) {
+		memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
+				sizeof(desc->port_names.wwnn));
+
+		memcpy(desc->port_names.wwpn, &vport->fabric_portname,
+				sizeof(desc->port_names.wwpn));
+	} else {  /* Point to Point */
+		memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
+				sizeof(desc->port_names.wwnn));
+
+		memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
+				sizeof(desc->port_names.wwpn));
+	}
+
+	desc->length = cpu_to_be32(sizeof(desc->port_names));
+	return sizeof(struct fc_rdp_port_name_desc);
+}
+
+static void
+lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
+		int status)
+{
+	struct lpfc_nodelist *ndlp = rdp_context->ndlp;
+	struct lpfc_vport *vport = ndlp->vport;
+	struct lpfc_iocbq *elsiocb;
+	struct ulp_bde64 *bpl;
+	IOCB_t *icmd;
+	uint8_t *pcmd;
+	struct ls_rjt *stat;
+	struct fc_rdp_res_frame *rdp_res;
+	uint32_t cmdsize, len;
+	uint16_t *flag_ptr;
+	int rc;
+
+	if (status != SUCCESS)
+		goto error;
+
+	/* This will change once we know the true size of the RDP payload */
+	cmdsize = sizeof(struct fc_rdp_res_frame);
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
+			lpfc_max_els_tries, rdp_context->ndlp,
+			rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
+	lpfc_nlp_put(ndlp);
+	if (!elsiocb)
+		goto free_rdp_context;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = rdp_context->rx_id;
+	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			"2171 Xmit RDP response tag x%x xri x%x, "
+			"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
+			elsiocb->iotag, elsiocb->iocb.ulpContext,
+			ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			ndlp->nlp_rpi);
+	rdp_res = (struct fc_rdp_res_frame *)
+		(((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+
+	/* Update Alarm and Warning */
+	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
+	phba->sfp_alarm |= *flag_ptr;
+	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
+	phba->sfp_warning |= *flag_ptr;
+
+	/* For RDP payload */
+	len = 8;
+	len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
+					 (len + pcmd), ELS_CMD_RDP);
+
+	len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
+			rdp_context->page_a0, rdp_context->page_a2);
+	len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
+				  phba);
+	len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
+				       (len + pcmd), &rdp_context->link_stat);
+	len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
+					     (len + pcmd), vport);
+	len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
+					(len + pcmd), vport, ndlp);
+	len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
+			&rdp_context->link_stat);
+	/* Check if nport is logged, BZ190632 */
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+		goto lpfc_skip_descriptor;
+
+	len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
+				     &rdp_context->link_stat, vport);
+	len += lpfc_rdp_res_oed_temp_desc(phba,
+				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+				rdp_context->page_a2);
+	len += lpfc_rdp_res_oed_voltage_desc(phba,
+				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+				rdp_context->page_a2);
+	len += lpfc_rdp_res_oed_txbias_desc(phba,
+				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+				rdp_context->page_a2);
+	len += lpfc_rdp_res_oed_txpower_desc(phba,
+				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+				rdp_context->page_a2);
+	len += lpfc_rdp_res_oed_rxpower_desc(phba,
+				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+				rdp_context->page_a2);
+	len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
+				     rdp_context->page_a0, vport);
+
+lpfc_skip_descriptor:
+	rdp_res->length = cpu_to_be32(len - 8);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	/* Now that we know the true size of the payload, update the BPL */
+	bpl = (struct ulp_bde64 *)
+		(((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
+	bpl->tus.f.bdeSize = len;
+	bpl->tus.f.bdeFlags = 0;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	phba->fc_stat.elsXmitACC++;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+
+	kfree(rdp_context);
+
+	return;
+error:
+	cmdsize = 2 * sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
+			ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
+	lpfc_nlp_put(ndlp);
+	if (!elsiocb)
+		goto free_rdp_context;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = rdp_context->rx_id;
+	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
+	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+
+	phba->fc_stat.elsXmitLSRJT++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (rc == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+free_rdp_context:
+	kfree(rdp_context);
+}
+
+static int
+lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
+{
+	LPFC_MBOXQ_t *mbox = NULL;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
+				"7105 failed to allocate mailbox memory");
+		return 1;
+	}
+
+	if (lpfc_sli4_dump_page_a0(phba, mbox))
+		goto prep_mbox_fail;
+	mbox->vport = rdp_context->ndlp->vport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
+	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		goto issue_mbox_fail;
+
+	return 0;
+
+prep_mbox_fail:
+issue_mbox_fail:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return 1;
+}
+
+/*
+ * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
+ * IOCB. First, the payload of the unsolicited RDP is checked.
+ * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
+ * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
+ * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
+ * gather all data and send RDP response.
+ *
+ * Return code
+ *   0 - Sent the acc response
+ *   1 - Sent the reject response.
+ */
+static int
+lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
+	struct fc_rdp_req_frame *rdp_req;
+	struct lpfc_rdp_context *rdp_context;
+	IOCB_t *cmd = NULL;
+	struct ls_rjt stat;
+
+	if (phba->sli_rev < LPFC_SLI_REV4 ||
+	    bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
+						LPFC_SLI_INTF_IF_TYPE_2) {
+		rjt_err = LSRJT_UNABLE_TPC;
+		rjt_expl = LSEXP_REQ_UNSUPPORTED;
+		goto error;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
+		rjt_err = LSRJT_UNABLE_TPC;
+		rjt_expl = LSEXP_REQ_UNSUPPORTED;
+		goto error;
+	}
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
+
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2422 ELS RDP Request "
+			 "dec len %d tag x%x port_id %d len %d\n",
+			 be32_to_cpu(rdp_req->rdp_des_length),
+			 be32_to_cpu(rdp_req->nport_id_desc.tag),
+			 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
+			 be32_to_cpu(rdp_req->nport_id_desc.length));
+
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+	    !phba->cfg_enable_SmartSAN) {
+		rjt_err = LSRJT_UNABLE_TPC;
+		rjt_expl = LSEXP_PORT_LOGIN_REQ;
+		goto error;
+	}
+	if (sizeof(struct fc_rdp_nport_desc) !=
+			be32_to_cpu(rdp_req->rdp_des_length))
+		goto rjt_logerr;
+	if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
+		goto rjt_logerr;
+	if (RDP_NPORT_ID_SIZE !=
+			be32_to_cpu(rdp_req->nport_id_desc.length))
+		goto rjt_logerr;
+	rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
+	if (!rdp_context) {
+		rjt_err = LSRJT_UNABLE_TPC;
+		goto error;
+	}
+
+	cmd = &cmdiocb->iocb;
+	rdp_context->ndlp = lpfc_nlp_get(ndlp);
+	rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
+	rdp_context->rx_id = cmd->ulpContext;
+	rdp_context->cmpl = lpfc_els_rdp_cmpl;
+	if (lpfc_get_rdp_info(phba, rdp_context)) {
+		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
+				 "2423 Unable to send mailbox");
+		kfree(rdp_context);
+		rjt_err = LSRJT_UNABLE_TPC;
+		lpfc_nlp_put(ndlp);
+		goto error;
+	}
+
+	return 0;
+
+rjt_logerr:
+	rjt_err = LSRJT_LOGICAL_ERR;
+
+error:
+	memset(&stat, 0, sizeof(stat));
+	stat.un.b.lsRjtRsnCode = rjt_err;
+	stat.un.b.lsRjtRsnCodeExp = rjt_expl;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 1;
+}
+
+
+static void
+lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	IOCB_t *icmd;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	struct ls_rjt *stat;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_lcb_context *lcb_context;
+	struct fc_lcb_res_frame *lcb_res;
+	uint32_t cmdsize, shdr_status, shdr_add_status;
+	int rc;
+
+	mb = &pmb->u.mb;
+	lcb_context = (struct lpfc_lcb_context *)pmb->context1;
+	ndlp = lcb_context->ndlp;
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	shdr = (union lpfc_sli4_cfg_shdr *)
+			&pmb->u.mqe.un.beacon_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
+				"0194 SET_BEACON_CONFIG mailbox "
+				"completed with status x%x add_status x%x,"
+				" mbx status x%x\n",
+				shdr_status, shdr_add_status, mb->mbxStatus);
+
+	if (mb->mbxStatus && !(shdr_status &&
+		shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		goto error;
+	}
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+	cmdsize = sizeof(struct fc_lcb_res_frame);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+			lpfc_max_els_tries, ndlp,
+			ndlp->nlp_DID, ELS_CMD_ACC);
+
+	/* Decrement the ndlp reference count from previous mbox command */
+	lpfc_nlp_put(ndlp);
+
+	if (!elsiocb)
+		goto free_lcb_context;
+
+	lcb_res = (struct fc_lcb_res_frame *)
+		(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = lcb_context->rx_id;
+	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+
+	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+	*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
+	lcb_res->lcb_sub_command = lcb_context->sub_command;
+	lcb_res->lcb_type = lcb_context->type;
+	lcb_res->lcb_frequency = lcb_context->frequency;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+
+	kfree(lcb_context);
+	return;
+
+error:
+	cmdsize = sizeof(struct fc_lcb_res_frame);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+			lpfc_max_els_tries, ndlp,
+			ndlp->nlp_DID, ELS_CMD_LS_RJT);
+	lpfc_nlp_put(ndlp);
+	if (!elsiocb)
+		goto free_lcb_context;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = lcb_context->rx_id;
+	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+	*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
+	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
+	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+
+	if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
+		stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitLSRJT++;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+free_lcb_context:
+	kfree(lcb_context);
+}
+
+static int
+lpfc_sli4_set_beacon(struct lpfc_vport *vport,
+		     struct lpfc_lcb_context *lcb_context,
+		     uint32_t beacon_state)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox = NULL;
+	uint32_t len;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return 1;
+
+	len = sizeof(struct lpfc_mbx_set_beacon_config) -
+		sizeof(struct lpfc_sli4_cfg_mhdr);
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
+			 LPFC_SLI4_MBX_EMBED);
+	mbox->context1 = (void *)lcb_context;
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_els_lcb_rsp;
+	bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
+	       phba->sli4_hba.physical_port);
+	bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
+	       beacon_state);
+	bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
+	bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return 1;
+	}
+
+	return 0;
+}
+
+
+/**
+ * lpfc_els_rcv_lcb - Process an unsolicited LCB
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
+ * First, the payload of the unsolicited LCB is checked.
+ * Then based on Subcommand beacon will either turn on or off.
+ *
+ * Return code
+ * 0 - Sent the acc response
+ * 1 - Sent the reject response.
+ **/
+static int
+lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint8_t *lp;
+	struct fc_lcb_request_frame *beacon;
+	struct lpfc_lcb_context *lcb_context;
+	uint8_t state, rjt_err;
+	struct ls_rjt stat;
+
+	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+	lp = (uint8_t *)pcmd->virt;
+	beacon = (struct fc_lcb_request_frame *)pcmd->virt;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			"0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
+			"type x%x frequency %x duration x%x\n",
+			lp[0], lp[1], lp[2],
+			beacon->lcb_command,
+			beacon->lcb_sub_command,
+			beacon->lcb_type,
+			beacon->lcb_frequency,
+			be16_to_cpu(beacon->lcb_duration));
+
+	if (phba->sli_rev < LPFC_SLI_REV4 ||
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_IF_TYPE_2)) {
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
+		goto rjt;
+	}
+
+	if (phba->hba_flag & HBA_FCOE_MODE) {
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
+		goto rjt;
+	}
+	if (beacon->lcb_sub_command != LPFC_LCB_ON &&
+	    beacon->lcb_sub_command != LPFC_LCB_OFF) {
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
+		goto rjt;
+	}
+	if (beacon->lcb_sub_command == LPFC_LCB_ON &&
+	    be16_to_cpu(beacon->lcb_duration) != 0) {
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
+		goto rjt;
+	}
+
+	lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
+	if (!lcb_context) {
+		rjt_err = LSRJT_UNABLE_TPC;
+		goto rjt;
+	}
+
+	state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
+	lcb_context->sub_command = beacon->lcb_sub_command;
+	lcb_context->type = beacon->lcb_type;
+	lcb_context->frequency = beacon->lcb_frequency;
+	lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+	lcb_context->rx_id = cmdiocb->iocb.ulpContext;
+	lcb_context->ndlp = lpfc_nlp_get(ndlp);
+	if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
+		lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+				 LOG_ELS, "0193 failed to send mail box");
+		kfree(lcb_context);
+		lpfc_nlp_put(ndlp);
+		rjt_err = LSRJT_UNABLE_TPC;
+		goto rjt;
+	}
+	return 0;
+rjt:
+	memset(&stat, 0, sizeof(stat));
+	stat.un.b.lsRjtRsnCode = rjt_err;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 1;
+}
+
+
+/**
+ * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine cleans up any Registration State Change Notification
+ * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
+ * @vport together with the host_lock is used to prevent multiple thread
+ * trying to access the RSCN array on a same @vport at the same time.
+ **/
+void
+lpfc_els_flush_rscn(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	int i;
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_rscn_flush) {
+		/* Another thread is walking fc_rscn_id_list on this vport */
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
+	vport->fc_rscn_flush = 1;
+	spin_unlock_irq(shost->host_lock);
+
+	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
+		vport->fc_rscn_id_list[i] = NULL;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_rscn_id_cnt = 0;
+	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_can_disctmo(vport);
+	/* Indicate we are done walking this fc_rscn_id_list */
+	vport->fc_rscn_flush = 0;
+}
+
+/**
+ * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: remote destination port identifier.
+ *
+ * This routine checks whether there is any pending Registration State
+ * Configuration Notification (RSCN) to a @did on @vport.
+ *
+ * Return code
+ *   None zero - The @did matched with a pending rscn
+ *   0 - not able to match @did with a pending rscn
+ **/
+int
+lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
+{
+	D_ID ns_did;
+	D_ID rscn_did;
+	uint32_t *lp;
+	uint32_t payload_len, i;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ns_did.un.word = did;
+
+	/* Never match fabric nodes for RSCNs */
+	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+		return 0;
+
+	/* If we are doing a FULL RSCN rediscovery, match everything */
+	if (vport->fc_flag & FC_RSCN_DISCOVERY)
+		return did;
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_rscn_flush) {
+		/* Another thread is walking fc_rscn_id_list on this vport */
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+	/* Indicate we are walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 1;
+	spin_unlock_irq(shost->host_lock);
+	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+		lp = vport->fc_rscn_id_list[i]->virt;
+		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+		payload_len -= sizeof(uint32_t);	/* take off word 0 */
+		while (payload_len) {
+			rscn_did.un.word = be32_to_cpu(*lp++);
+			payload_len -= sizeof(uint32_t);
+			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
+			case RSCN_ADDRESS_FORMAT_PORT:
+				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+				    && (ns_did.un.b.area == rscn_did.un.b.area)
+				    && (ns_did.un.b.id == rscn_did.un.b.id))
+					goto return_did_out;
+				break;
+			case RSCN_ADDRESS_FORMAT_AREA:
+				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+				    && (ns_did.un.b.area == rscn_did.un.b.area))
+					goto return_did_out;
+				break;
+			case RSCN_ADDRESS_FORMAT_DOMAIN:
+				if (ns_did.un.b.domain == rscn_did.un.b.domain)
+					goto return_did_out;
+				break;
+			case RSCN_ADDRESS_FORMAT_FABRIC:
+				goto return_did_out;
+			}
+		}
+	}
+	/* Indicate we are done with walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 0;
+	return 0;
+return_did_out:
+	/* Indicate we are done with walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 0;
+	return did;
+}
+
+/**
+ * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
+ * state machine for a @vport's nodes that are with pending RSCN (Registration
+ * State Change Notification).
+ *
+ * Return code
+ *   0 - Successful (currently alway return 0)
+ **/
+static int
+lpfc_rscn_recovery_check(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+
+	/* Move all affected nodes by pending RSCNs to NPR state. */
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp) ||
+		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
+			continue;
+
+		/* NVME Target mode does not do RSCN Recovery. */
+		if (vport->phba->nvmet_support)
+			continue;
+
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					NLP_EVT_DEVICE_RECOVERY);
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_send_rscn_event - Send an RSCN event to management application
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ *
+ * lpfc_send_rscn_event sends an RSCN netlink event to management
+ * applications.
+ */
+static void
+lpfc_send_rscn_event(struct lpfc_vport *vport,
+		struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_dmabuf *pcmd;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	uint32_t *payload_ptr;
+	uint32_t payload_len;
+	struct lpfc_rscn_event_header *rscn_event_data;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	payload_ptr = (uint32_t *) pcmd->virt;
+	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
+
+	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
+		payload_len, GFP_KERNEL);
+	if (!rscn_event_data) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0147 Failed to allocate memory for RSCN event\n");
+		return;
+	}
+	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
+	rscn_event_data->payload_length = payload_len;
+	memcpy(rscn_event_data->rscn_payload, payload_ptr,
+		payload_len);
+
+	fc_host_post_vendor_event(shost,
+		fc_get_event_number(),
+		sizeof(struct lpfc_rscn_event_header) + payload_len,
+		(char *)rscn_event_data,
+		LPFC_NL_VENDOR_ID);
+
+	kfree(rscn_event_data);
+}
+
+/**
+ * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RSCN (Registration State Change
+ * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
+ * to invoke fc_host_post_event() routine to the FC transport layer. If the
+ * discover state machine is about to begin discovery, it just accepts the
+ * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
+ * contains N_Port IDs for other vports on this HBA, it just accepts the
+ * RSCN and ignore processing it. If the state machine is in the recovery
+ * state, the fc_rscn_id_list of this @vport is walked and the
+ * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
+ * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
+ * routine is invoked to handle the RSCN event.
+ *
+ * Return code
+ *   0 - Just sent the acc response
+ *   1 - Sent the acc response and waited for name server completion
+ **/
+static int
+lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp, *datap;
+	uint32_t payload_len, length, nportid, *cmd;
+	int rscn_cnt;
+	int rscn_id = 0, hba_id = 0;
+	int i;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+	payload_len -= sizeof(uint32_t);	/* take off word 0 */
+	/* RSCN received */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
+			 vport->fc_flag, payload_len, *lp,
+			 vport->fc_rscn_id_cnt);
+
+	/* Send an RSCN event to the management application */
+	lpfc_send_rscn_event(vport, cmdiocb);
+
+	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
+		fc_host_post_event(shost, fc_get_event_number(),
+			FCH_EVT_RSCN, lp[i]);
+
+	/* If we are about to begin discovery, just ACC the RSCN.
+	 * Discovery processing will satisfy it.
+	 */
+	if (vport->port_state <= LPFC_NS_QRY) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
+			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+		return 0;
+	}
+
+	/* If this RSCN just contains NPortIDs for other vports on this HBA,
+	 * just ACC and ignore it.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+		!(vport->cfg_peer_port_login)) {
+		i = payload_len;
+		datap = lp;
+		while (i > 0) {
+			nportid = *datap++;
+			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
+			i -= sizeof(uint32_t);
+			rscn_id++;
+			if (lpfc_find_vport_by_did(phba, nportid))
+				hba_id++;
+		}
+		if (rscn_id == hba_id) {
+			/* ALL NPortIDs in RSCN are on HBA */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0219 Ignore RSCN "
+					 "Data: x%x x%x x%x x%x\n",
+					 vport->fc_flag, payload_len,
+					 *lp, vport->fc_rscn_id_cnt);
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
+				ndlp->nlp_DID, vport->port_state,
+				ndlp->nlp_flag);
+
+			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
+				ndlp, NULL);
+			return 0;
+		}
+	}
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_rscn_flush) {
+		/* Another thread is walking fc_rscn_id_list on this vport */
+		vport->fc_flag |= FC_RSCN_DISCOVERY;
+		spin_unlock_irq(shost->host_lock);
+		/* Send back ACC */
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+		return 0;
+	}
+	/* Indicate we are walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 1;
+	spin_unlock_irq(shost->host_lock);
+	/* Get the array count after successfully have the token */
+	rscn_cnt = vport->fc_rscn_id_cnt;
+	/* If we are already processing an RSCN, save the received
+	 * RSCN payload buffer, cmdiocb->context2 to process later.
+	 */
+	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
+			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_RSCN_DEFERRED;
+		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
+		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
+			vport->fc_flag |= FC_RSCN_MODE;
+			spin_unlock_irq(shost->host_lock);
+			if (rscn_cnt) {
+				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
+				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
+			}
+			if ((rscn_cnt) &&
+			    (payload_len + length <= LPFC_BPL_SIZE)) {
+				*cmd &= ELS_CMD_MASK;
+				*cmd |= cpu_to_be32(payload_len + length);
+				memcpy(((uint8_t *)cmd) + length, lp,
+				       payload_len);
+			} else {
+				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
+				vport->fc_rscn_id_cnt++;
+				/* If we zero, cmdiocb->context2, the calling
+				 * routine will not try to free it.
+				 */
+				cmdiocb->context2 = NULL;
+			}
+			/* Deferred RSCN */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0235 Deferred RSCN "
+					 "Data: x%x x%x x%x\n",
+					 vport->fc_rscn_id_cnt, vport->fc_flag,
+					 vport->port_state);
+		} else {
+			vport->fc_flag |= FC_RSCN_DISCOVERY;
+			spin_unlock_irq(shost->host_lock);
+			/* ReDiscovery RSCN */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0234 ReDiscovery RSCN "
+					 "Data: x%x x%x x%x\n",
+					 vport->fc_rscn_id_cnt, vport->fc_flag,
+					 vport->port_state);
+		}
+		/* Indicate we are done walking fc_rscn_id_list on this vport */
+		vport->fc_rscn_flush = 0;
+		/* Send back ACC */
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+		/* send RECOVERY event for ALL nodes that match RSCN payload */
+		lpfc_rscn_recovery_check(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_RSCN_DEFERRED;
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
+		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_RSCN_MODE;
+	spin_unlock_irq(shost->host_lock);
+	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
+	/* Indicate we are done walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 0;
+	/*
+	 * If we zero, cmdiocb->context2, the calling routine will
+	 * not try to free it.
+	 */
+	cmdiocb->context2 = NULL;
+	lpfc_set_disctmo(vport);
+	/* Send back ACC */
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	/* send RECOVERY event for ALL nodes that match RSCN payload */
+	lpfc_rscn_recovery_check(vport);
+	return lpfc_els_handle_rscn(vport);
+}
+
+/**
+ * lpfc_els_handle_rscn - Handle rscn for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine handles the Registration State Configuration Notification
+ * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
+ * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
+ * if the ndlp to NameServer exists, a Common Transport (CT) command to the
+ * NameServer shall be issued. If CT command to the NameServer fails to be
+ * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
+ * RSCN activities with the @vport.
+ *
+ * Return code
+ *   0 - Cleaned up rscn on the @vport
+ *   1 - Wait for plogi to name server before proceed
+ **/
+int
+lpfc_els_handle_rscn(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	/* Ignore RSCN if the port is being torn down. */
+	if (vport->load_flag & FC_UNLOADING) {
+		lpfc_els_flush_rscn(vport);
+		return 0;
+	}
+
+	/* Start timer for RSCN processing */
+	lpfc_set_disctmo(vport);
+
+	/* RSCN processed */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
+			 vport->port_state);
+
+	/* To process RSCN, first compare RSCN data with NameServer */
+	vport->fc_ns_retry = 0;
+	vport->num_disc_nodes = 0;
+
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+		/* Good ndlp, issue CT Request to NameServer.  Need to
+		 * know how many gidfts were issued.  If none, then just
+		 * flush the RSCN.  Otherwise, the outstanding requests
+		 * need to complete.
+		 */
+		vport->gidft_inp = 0;
+		if (lpfc_issue_gidft(vport) > 0)
+			return 1;
+	} else {
+		/* Nameserver login in question.  Revalidate. */
+		if (ndlp) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_PLOGI_ISSUE);
+			if (!ndlp) {
+				lpfc_els_flush_rscn(vport);
+				return 0;
+			}
+			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+		} else {
+			ndlp = lpfc_nlp_init(vport, NameServer_DID);
+			if (!ndlp) {
+				lpfc_els_flush_rscn(vport);
+				return 0;
+			}
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+		}
+		ndlp->nlp_type |= NLP_FABRIC;
+		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
+		/* Wait for NameServer login cmpl before we can
+		 * continue
+		 */
+		return 1;
+	}
+
+	lpfc_els_flush_rscn(vport);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
+ * unsolicited event. An unsolicited FLOGI can be received in a point-to-
+ * point topology. As an unsolicited FLOGI should not be received in a loop
+ * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
+ * lpfc_check_sparm() routine is invoked to check the parameters in the
+ * unsolicited FLOGI. If parameters validation failed, the routine
+ * lpfc_els_rsp_reject() shall be called with reject reason code set to
+ * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
+ * FLOGI shall be compared with the Port WWN of the @vport to determine who
+ * will initiate PLOGI. The higher lexicographical value party shall has
+ * higher priority (as the winning port) and will initiate PLOGI and
+ * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
+ * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
+ * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
+ *
+ * Return code
+ *   0 - Successfully processed the unsolicited flogi
+ *   1 - Failed to process the unsolicited flogi
+ **/
+static int
+lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	uint32_t *lp = (uint32_t *) pcmd->virt;
+	IOCB_t *icmd = &cmdiocb->iocb;
+	struct serv_parm *sp;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t cmd, did;
+	int rc;
+	uint32_t fc_flag = 0;
+	uint32_t port_state = 0;
+
+	cmd = *lp++;
+	sp = (struct serv_parm *) lp;
+
+	/* FLOGI received */
+
+	lpfc_set_disctmo(vport);
+
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		/* We should never receive a FLOGI in loop mode, ignore it */
+		did = icmd->un.elsreq64.remoteID;
+
+		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
+		   Loop Mode */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0113 An FLOGI ELS command x%x was "
+				 "received from DID x%x in Loop Mode\n",
+				 cmd, did);
+		return 1;
+	}
+
+	(void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
+
+	/*
+	 * If our portname is greater than the remote portname,
+	 * then we initiate Nport login.
+	 */
+
+	rc = memcmp(&vport->fc_portname, &sp->portName,
+		    sizeof(struct lpfc_name));
+
+	if (!rc) {
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			mbox = mempool_alloc(phba->mbox_mem_pool,
+					     GFP_KERNEL);
+			if (!mbox)
+				return 1;
+			lpfc_linkdown(phba);
+			lpfc_init_link(phba, mbox,
+				       phba->cfg_topology,
+				       phba->cfg_link_speed);
+			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mbox->vport = vport;
+			rc = lpfc_sli_issue_mbox(phba, mbox,
+						 MBX_NOWAIT);
+			lpfc_set_loopback_flag(phba);
+			if (rc == MBX_NOT_FINISHED)
+				mempool_free(mbox, phba->mbox_mem_pool);
+			return 1;
+		}
+
+		/* abort the flogi coming back to ourselves
+		 * due to external loopback on the port.
+		 */
+		lpfc_els_abort_flogi(phba);
+		return 0;
+
+	} else if (rc > 0) {	/* greater than */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_PT2PT_PLOGI;
+		spin_unlock_irq(shost->host_lock);
+
+		/* If we have the high WWPN we can assign our own
+		 * myDID; otherwise, we have to WAIT for a PLOGI
+		 * from the remote NPort to find out what it
+		 * will be.
+		 */
+		vport->fc_myDID = PT2PT_LocalID;
+	} else {
+		vport->fc_myDID = PT2PT_RemoteID;
+	}
+
+	/*
+	 * The vport state should go to LPFC_FLOGI only
+	 * AFTER we issue a FLOGI, not receive one.
+	 */
+	spin_lock_irq(shost->host_lock);
+	fc_flag = vport->fc_flag;
+	port_state = vport->port_state;
+	vport->fc_flag |= FC_PT2PT;
+	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "3311 Rcv Flogi PS x%x new PS x%x "
+			 "fc_flag x%x new fc_flag x%x\n",
+			 port_state, vport->port_state,
+			 fc_flag, vport->fc_flag);
+
+	/*
+	 * We temporarily set fc_myDID to make it look like we are
+	 * a Fabric. This is done just so we end up with the right
+	 * did / sid on the FLOGI ACC rsp.
+	 */
+	did = vport->fc_myDID;
+	vport->fc_myDID = Fabric_DID;
+
+	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+	/* Send back ACC */
+	lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
+
+	/* Now lets put fc_myDID back to what its supposed to be */
+	vport->fc_myDID = did;
+
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Request Node Identification Data (RNID) IOCB
+ * received as an ELS unsolicited event. Only when the RNID specified format
+ * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
+ * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
+ * Accept (ACC) the RNID ELS command. All the other RNID formats are
+ * rejected by invoking the lpfc_els_rsp_reject() routine.
+ *
+ * Return code
+ *   0 - Successfully processed rnid iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	RNID *rn;
+	struct ls_rjt stat;
+	uint32_t cmd;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	rn = (RNID *) lp;
+
+	/* RNID received */
+
+	switch (rn->Format) {
+	case 0:
+	case RNID_TOPOLOGY_DISC:
+		/* Send back ACC */
+		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
+		break;
+	default:
+		/* Reject this request because format not supported */
+		stat.un.b.lsRjtRsvd0 = 0;
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+		stat.un.b.vendorUnique = 0;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	uint8_t *pcmd;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+	/* skip over first word of echo command to find echo data */
+	pcmd += sizeof(uint32_t);
+
+	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Link Incident Report Registration(LIRR) IOCB
+ * received as an ELS unsolicited event. Currently, this function just invokes
+ * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
+ *
+ * Return code
+ *   0 - Successfully processed lirr iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct ls_rjt stat;
+
+	/* For now, unconditionally reject this command */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
+ * received as an ELS unsolicited event. A request to RRQ shall only
+ * be accepted if the Originator Nx_Port N_Port_ID or the Responder
+ * Nx_Port N_Port_ID of the target Exchange is the same as the
+ * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
+ * not accepted, an LS_RJT with reason code "Unable to perform
+ * command request" and reason code explanation "Invalid Originator
+ * S_ID" shall be returned. For now, we just unconditionally accept
+ * RRQ from the target.
+ **/
+static void
+lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
+}
+
+/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	IOCB_t *icmd;
+	struct RLS_RSP *rls_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint16_t oxid;
+	uint16_t rxid;
+	uint32_t cmdsize;
+
+	mb = &pmb->u.mb;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	/* Decrement the ndlp reference count from previous mbox command */
+	lpfc_nlp_put(ndlp);
+
+	if (!elsiocb) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = rxid;
+	icmd->unsli3.rcvsli3.ox_id = oxid;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+	rls_rsp = (struct RLS_RSP *)pcmd;
+
+	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+}
+
+/**
+ * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	IOCB_t *icmd;
+	RPS_RSP *rps_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint16_t status;
+	uint16_t oxid;
+	uint16_t rxid;
+	uint32_t cmdsize;
+
+	mb = &pmb->u.mb;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	/* Decrement the ndlp reference count from previous mbox command */
+	lpfc_nlp_put(ndlp);
+
+	if (!elsiocb)
+		return;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = rxid;
+	icmd->unsli3.rcvsli3.ox_id = oxid;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+	rps_rsp = (RPS_RSP *)pcmd;
+
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+		status = 0x10;
+	else
+		status = 0x8;
+	if (phba->pport->fc_flag & FC_FABRIC)
+		status |= 0x4;
+
+	rps_rsp->rsvd1 = 0;
+	rps_rsp->portStatus = cpu_to_be16(status);
+	rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+	rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+	rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+	rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+	rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+	rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+	/* Xmit ELS RPS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+	return;
+}
+
+/**
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+	if (mbox) {
+		lpfc_read_lnk_stat(phba, mbox);
+		mbox->context1 = (void *)((unsigned long)
+			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+			cmdiocb->iocb.ulpContext)); /* rx_id */
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+			!= MBX_NOT_FINISHED)
+			/* Mbox completion will send ELS Response */
+			return 0;
+		/* Decrement reference count used for the failed mbox
+		 * command.
+		 */
+		lpfc_nlp_put(ndlp);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ *   0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct ls_rjt stat;
+	struct RTV_RSP *rtv_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint32_t cmdsize;
+
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+
+	/* use the command's xri in the response */
+	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;  /* Xri / rx_id */
+	elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+
+	rtv_rsp = (struct RTV_RSP *)pcmd;
+
+	/* populate RTV payload */
+	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+			 "Data: x%x x%x x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi,
+			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+	return 0;
+
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPS) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPS Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rps iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t *lp;
+	uint8_t flag;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *pcmd;
+	RPS *rps;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	flag = (be32_to_cpu(*lp++) & 0xf);
+	rps = (RPS *) lp;
+
+	if ((flag == 0) ||
+	    ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
+	    ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
+				    sizeof(struct lpfc_name)) == 0))) {
+
+		printk("Fix me....\n");
+		dump_stack();
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+		if (mbox) {
+			lpfc_read_lnk_stat(phba, mbox);
+			mbox->context1 = (void *)((unsigned long)
+				((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+				cmdiocb->iocb.ulpContext)); /* rx_id */
+			mbox->context2 = lpfc_nlp_get(ndlp);
+			mbox->vport = vport;
+			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+				!= MBX_NOT_FINISHED)
+				/* Mbox completion will send ELS Response */
+				return 0;
+			/* Decrement reference count used for the failed mbox
+			 * command.
+			 */
+			lpfc_nlp_put(ndlp);
+			mempool_free(mbox, phba->mbox_mem_pool);
+		}
+	}
+
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: DID of the target.
+ * @rrq: Pointer to the rrq struct.
+ *
+ * Build a ELS RRQ command and send it to the target. If the issue_iocb is
+ * Successful the the completion handler will clear the RRQ.
+ *
+ * Return codes
+ *   0 - Successfully sent rrq els iocb.
+ *   1 - Failed to send rrq els iocb.
+ **/
+static int
+lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			uint32_t did, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct RRQ *els_rrq;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int ret;
+
+
+	if (ndlp != rrq->ndlp)
+		ndlp = rrq->ndlp;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return 1;
+
+	/* If ndlp is not NULL, we will bump the reference count on it */
+	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
+				     ELS_CMD_RRQ);
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For RRQ request, remainder of payload is Exchange IDs */
+	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
+	pcmd += sizeof(uint32_t);
+	els_rrq = (struct RRQ *) pcmd;
+
+	bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
+	bf_set(rrq_rxid, els_rrq, rrq->rxid);
+	bf_set(rrq_did, els_rrq, vport->fc_myDID);
+	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
+	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
+
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue RRQ:     did:x%x",
+		did, rrq->xritag, rrq->rxid);
+	elsiocb->context_un.rrq = rrq;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (ret == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_send_rrq - Sends ELS RRQ if needed.
+ * @phba: pointer to lpfc hba data structure.
+ * @rrq: pointer to the active rrq.
+ *
+ * This routine will call the lpfc_issue_els_rrq if the rrq is
+ * still active for the xri. If this function returns a failure then
+ * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
+ *
+ * Returns 0 Success.
+ *         1 Failure.
+ **/
+int
+lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
+						       rrq->nlp_DID);
+	if (!ndlp)
+		return 1;
+
+	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
+		return lpfc_issue_els_rrq(rrq->vport, ndlp,
+					 rrq->nlp_DID, rrq);
+	else
+		return 1;
+}
+
+/**
+ * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdsize: size of the ELS command.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
+ * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPL Accept Response ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued ACC RPL ELS command
+ *   1 - Failed to issue ACC RPL ELS command
+ **/
+static int
+lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
+		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	IOCB_t *icmd, *oldcmd;
+	RPL_RSP rpl_rsp;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint16_t);
+	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
+	pcmd += sizeof(uint16_t);
+
+	/* Setup the RPL ACC payload */
+	rpl_rsp.listLen = be32_to_cpu(1);
+	rpl_rsp.index = 0;
+	rpl_rsp.port_num_blk.portNum = 0;
+	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
+	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
+	    sizeof(struct lpfc_name));
+	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
+	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0120 Xmit ELS RPL ACC response tag x%x "
+			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+			 "rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port List (RPL) IOCB received as an ELS
+ * unsolicited event. It first checks the remote port state. If the remote
+ * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
+ * invokes the lpfc_els_rsp_reject() routine to send reject response.
+ * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
+ * to accept the RPL.
+ *
+ * Return code
+ *   0 - Successfully processed rpl iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	uint32_t maxsize;
+	uint16_t cmdsize;
+	RPL *rpl;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+		/* issue rejection response */
+		stat.un.b.lsRjtRsvd0 = 0;
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+		stat.un.b.vendorUnique = 0;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		/* rejected the unsolicited RPL request and done with it */
+		return 0;
+	}
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	rpl = (RPL *) (lp + 1);
+	maxsize = be32_to_cpu(rpl->maxsize);
+
+	/* We support only one port */
+	if ((rpl->index == 0) &&
+	    ((maxsize == 0) ||
+	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
+		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
+	} else {
+		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
+	}
+	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
+
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_farp - Process an unsolicited farp request els command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
+ * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
+ * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
+ * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
+ * remote PortName is compared against the FC PortName stored in the @vport
+ * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
+ * compared against the FC NodeName stored in the @vport data structure.
+ * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
+ * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
+ * invoked to send out FARP Response to the remote node. Before sending the
+ * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
+ * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
+ * routine is invoked to log into the remote port first.
+ *
+ * Return code
+ *   0 - Either the FARP Match Mode not supported or successfully processed
+ **/
+static int
+lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	FARP *fp;
+	uint32_t cmd, cnt, did;
+
+	icmd = &cmdiocb->iocb;
+	did = icmd->un.elsreq64.remoteID;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	fp = (FARP *) lp;
+	/* FARP-REQ received from DID <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0601 FARP-REQ received from DID x%x\n", did);
+	/* We will only support match on WWPN or WWNN */
+	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
+		return 0;
+	}
+
+	cnt = 0;
+	/* If this FARP command is searching for my portname */
+	if (fp->Mflags & FARP_MATCH_PORT) {
+		if (memcmp(&fp->RportName, &vport->fc_portname,
+			   sizeof(struct lpfc_name)) == 0)
+			cnt = 1;
+	}
+
+	/* If this FARP command is searching for my nodename */
+	if (fp->Mflags & FARP_MATCH_NODE) {
+		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
+			   sizeof(struct lpfc_name)) == 0)
+			cnt = 1;
+	}
+
+	if (cnt) {
+		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+			/* Log back into the node before sending the FARP. */
+			if (fp->Rflags & FARP_REQUEST_PLOGI) {
+				ndlp->nlp_prev_state = ndlp->nlp_state;
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_PLOGI_ISSUE);
+				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+			}
+
+			/* Send a FARP response to that node */
+			if (fp->Rflags & FARP_REQUEST_FARPR)
+				lpfc_issue_els_farpr(vport, did, 0);
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
+ * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
+ * the FARP response request.
+ *
+ * Return code
+ *   0 - Successfully processed FARPR IOCB (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_nodelist  *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	uint32_t cmd, did;
+
+	icmd = &cmdiocb->iocb;
+	did = icmd->un.elsreq64.remoteID;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	/* FARP-RSP received from DID <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0600 FARP-RSP received from DID x%x\n", did);
+	/* ACCEPT the Farp resp request */
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @fan_ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Fabric Address Notification (FAN) IOCB
+ * command received as an ELS unsolicited event. The FAN ELS command will
+ * only be processed on a physical port (i.e., the @vport represents the
+ * physical port). The fabric NodeName and PortName from the FAN IOCB are
+ * compared against those in the phba data structure. If any of those is
+ * different, the lpfc_initial_flogi() routine is invoked to initialize
+ * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
+ * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ *   0 - Successfully processed fan iocb (currently always return 0).
+ **/
+static int
+lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *fan_ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t *lp;
+	FAN *fp;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+	fp = (FAN *) ++lp;
+	/* FAN received; Fan does not have a reply sequence */
+	if ((vport == phba->pport) &&
+	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
+		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
+			    sizeof(struct lpfc_name))) ||
+		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
+			    sizeof(struct lpfc_name)))) {
+			/* This port has switched fabrics. FLOGI is required */
+			lpfc_issue_init_vfi(vport);
+		} else {
+			/* FAN verified - skip FLOGI */
+			vport->fc_myDID = vport->fc_prevDID;
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else {
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					"3138 Need register VFI: (x%x/%x)\n",
+					vport->fc_prevDID, vport->fc_myDID);
+				lpfc_issue_reg_vfi(vport);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_timeout - Handler funciton to the els timer
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the ELS timer after timeout. It posts the ELS
+ * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
+ * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
+ * up the worker thread. It is for the worker thread to invoke the routine
+ * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
+ **/
+void
+lpfc_els_timeout(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&vport->work_port_lock, iflag);
+	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+		vport->work_port_events |= WORKER_ELS_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+
+/**
+ * lpfc_els_timeout_handler - Process an els timeout event
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine is the actual handler function that processes an ELS timeout
+ * event. It walks the ELS ring to get and abort all the IOCBs (except the
+ * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
+ * invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
+void
+lpfc_els_timeout_handler(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	IOCB_t *cmd = NULL;
+	struct lpfc_dmabuf *pcmd;
+	uint32_t els_command = 0;
+	uint32_t timeout;
+	uint32_t remote_ID = 0xffffffff;
+	LIST_HEAD(abort_list);
+
+
+	timeout = (uint32_t)(phba->fc_ratov << 1);
+
+	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return;
+
+	if ((phba->pport->load_flag & FC_UNLOADING))
+		return;
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	if ((phba->pport->load_flag & FC_UNLOADING)) {
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			spin_unlock(&pring->ring_lock);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+		cmd = &piocb->iocb;
+
+		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
+		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+
+		pcmd = (struct lpfc_dmabuf *) piocb->context2;
+		if (pcmd)
+			els_command = *(uint32_t *) (pcmd->virt);
+
+		if (els_command == ELS_CMD_FARP ||
+		    els_command == ELS_CMD_FARPR ||
+		    els_command == ELS_CMD_FDISC)
+			continue;
+
+		if (piocb->drvrTimeout > 0) {
+			if (piocb->drvrTimeout >= timeout)
+				piocb->drvrTimeout -= timeout;
+			else
+				piocb->drvrTimeout = 0;
+			continue;
+		}
+
+		remote_ID = 0xffffffff;
+		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
+			remote_ID = cmd->un.elsreq64.remoteID;
+		else {
+			struct lpfc_nodelist *ndlp;
+			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+				remote_ID = ndlp->nlp_DID;
+		}
+		list_add_tail(&piocb->dlist, &abort_list);
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		cmd = &piocb->iocb;
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			 "0127 ELS timeout Data: x%x x%x x%x "
+			 "x%x\n", els_command,
+			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+		spin_lock_irq(&phba->hbalock);
+		list_del_init(&piocb->dlist);
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	if (!list_empty(&pring->txcmplq))
+		if (!(phba->pport->load_flag & FC_UNLOADING))
+			mod_timer(&vport->els_tmofunc,
+				  jiffies + msecs_to_jiffies(1000 * timeout));
+}
+
+/**
+ * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with a non-NULL completion callback function, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
+ * callback function, the IOCB will simply be released. Finally, it walks
+ * the ELS transmit completion queue to issue an abort IOCB to any transmit
+ * completion queue IOCB that is associated with the @vport and is not
+ * an IOCB from libdfc (i.e., the management plane IOCBs that are not
+ * part of the discovery state machine) out to HBA by invoking the
+ * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
+ * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
+ * the IOCBs are aborted when this function returns.
+ **/
+void
+lpfc_els_flush_cmd(struct lpfc_vport *vport)
+{
+	LIST_HEAD(abort_list);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	IOCB_t *cmd = NULL;
+
+	lpfc_fabric_abort_vport(vport);
+	/*
+	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
+	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
+	 * ultimately grabs the ring_lock, the driver must splice the list into
+	 * a working list and release the locks before calling the abort.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	pring = lpfc_phba_elsring(phba);
+
+	/* Bail out if we've no ELS wq, like in PCI error recovery case. */
+	if (unlikely(!pring)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+		list_add_tail(&piocb->dlist, &abort_list);
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+	/* Abort each iocb on the aborted list and remove the dlist links. */
+	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		spin_lock_irq(&phba->hbalock);
+		list_del_init(&piocb->dlist);
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+		spin_unlock_irq(&phba->hbalock);
+	}
+	if (!list_empty(&abort_list))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "3387 abort list for txq not empty\n");
+	INIT_LIST_HEAD(&abort_list);
+
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+		cmd = &piocb->iocb;
+
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+			continue;
+		}
+
+		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
+		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
+		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+
+		list_del_init(&piocb->list);
+		list_add_tail(&piocb->list, &abort_list);
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancell all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &abort_list,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+
+	return;
+}
+
+/**
+ * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with the completion callback function associated, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
+ * callback function associated, the IOCB will simply be released. Finally,
+ * it walks the ELS transmit completion queue to issue an abort IOCB to any
+ * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
+ * management plane IOCBs that are not part of the discovery state machine)
+ * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
+void
+lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
+{
+	struct lpfc_vport *vport;
+	list_for_each_entry(vport, &phba->port_list, listentry)
+		lpfc_els_flush_cmd(vport);
+
+	return;
+}
+
+/**
+ * lpfc_send_els_failure_event - Posts an ELS command failure event
+ * @phba: Pointer to hba context object.
+ * @cmdiocbp: Pointer to command iocb which reported error.
+ * @rspiocbp: Pointer to response iocb which reported error.
+ *
+ * This function sends an event when there is an ELS command
+ * failure.
+ **/
+void
+lpfc_send_els_failure_event(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbp,
+			struct lpfc_iocbq *rspiocbp)
+{
+	struct lpfc_vport *vport = cmdiocbp->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_lsrjt_event lsrjt_event;
+	struct lpfc_fabric_event_header fabric_event;
+	struct ls_rjt stat;
+	struct lpfc_nodelist *ndlp;
+	uint32_t *pcmd;
+
+	ndlp = cmdiocbp->context1;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
+		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
+		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
+			sizeof(struct lpfc_name));
+		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
+			sizeof(struct lpfc_name));
+		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+			cmdiocbp->context2)->virt);
+		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
+		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
+		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(lsrjt_event),
+			(char *)&lsrjt_event,
+			LPFC_NL_VENDOR_ID);
+		return;
+	}
+	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
+		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+		fabric_event.event_type = FC_REG_FABRIC_EVENT;
+		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
+		else
+			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
+		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
+			sizeof(struct lpfc_name));
+		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
+			sizeof(struct lpfc_name));
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(fabric_event),
+			(char *)&fabric_event,
+			LPFC_NL_VENDOR_ID);
+		return;
+	}
+
+}
+
+/**
+ * lpfc_send_els_event - Posts unsolicited els event
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer FC node object.
+ * @cmd: ELS command code.
+ *
+ * This function posts an event when there is an incoming
+ * unsolicited ELS command.
+ **/
+static void
+lpfc_send_els_event(struct lpfc_vport *vport,
+		    struct lpfc_nodelist *ndlp,
+		    uint32_t *payload)
+{
+	struct lpfc_els_event_header *els_data = NULL;
+	struct lpfc_logo_event *logo_data = NULL;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (*payload == ELS_CMD_LOGO) {
+		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
+		if (!logo_data) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0148 Failed to allocate memory "
+				"for LOGO event\n");
+			return;
+		}
+		els_data = &logo_data->header;
+	} else {
+		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
+			GFP_KERNEL);
+		if (!els_data) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0149 Failed to allocate memory "
+				"for ELS event\n");
+			return;
+		}
+	}
+	els_data->event_type = FC_REG_ELS_EVENT;
+	switch (*payload) {
+	case ELS_CMD_PLOGI:
+		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
+		break;
+	case ELS_CMD_PRLO:
+		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
+		break;
+	case ELS_CMD_ADISC:
+		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
+		break;
+	case ELS_CMD_LOGO:
+		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
+		/* Copy the WWPN in the LOGO payload */
+		memcpy(logo_data->logo_wwpn, &payload[2],
+			sizeof(struct lpfc_name));
+		break;
+	default:
+		kfree(els_data);
+		return;
+	}
+	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+	if (*payload == ELS_CMD_LOGO) {
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(struct lpfc_logo_event),
+			(char *)logo_data,
+			LPFC_NL_VENDOR_ID);
+		kfree(logo_data);
+	} else {
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(struct lpfc_els_event_header),
+			(char *)els_data,
+			LPFC_NL_VENDOR_ID);
+		kfree(els_data);
+	}
+
+	return;
+}
+
+
+/**
+ * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited IOCB. If not, it will create a new one with
+ * the DID from the unsolicited IOCB. The ELS command from the unsolicited
+ * IOCB is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_nodelist *ndlp;
+	struct ls_rjt stat;
+	uint32_t *payload;
+	uint32_t cmd, did, newnode;
+	uint8_t rjt_exp, rjt_err = 0;
+	IOCB_t *icmd = &elsiocb->iocb;
+
+	if (!vport || !(elsiocb->context2))
+		goto dropit;
+
+	newnode = 0;
+	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+	cmd = *payload;
+	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
+		lpfc_post_buffer(phba, pring, 1);
+
+	did = icmd->un.rcvels.remoteID;
+	if (icmd->ulpStatus) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
+			icmd->ulpStatus, icmd->un.ulpWord[4], did);
+		goto dropit;
+	}
+
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport))
+		goto dropit;
+
+	/* Ignore traffic received during vport shutdown. */
+	if (vport->load_flag & FC_UNLOADING)
+		goto dropit;
+
+	/* If NPort discovery is delayed drop incoming ELS */
+	if ((vport->fc_flag & FC_DISC_DELAYED) &&
+			(cmd != ELS_CMD_PLOGI))
+		goto dropit;
+
+	ndlp = lpfc_findnode_did(vport, did);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = lpfc_nlp_init(vport, did);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+			ndlp->nlp_type |= NLP_FABRIC;
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp,
+					NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+			ndlp->nlp_type |= NLP_FABRIC;
+	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+		/* This is similar to the new node path */
+		ndlp = lpfc_nlp_get(ndlp);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+	}
+
+	phba->fc_stat.elsRcvFrame++;
+
+	/*
+	 * Do not process any unsolicited ELS commands
+	 * if the ndlp is in DEV_LOSS
+	 */
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
+		spin_unlock_irq(shost->host_lock);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		goto dropit;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	elsiocb->context1 = lpfc_nlp_get(ndlp);
+	elsiocb->vport = vport;
+
+	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
+		cmd &= ELS_CMD_MASK;
+	}
+	/* ELS command <elsCmd> received from NPORT <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0112 ELS command x%x received from NPORT x%x "
+			 "Data: x%x x%x x%x x%x\n",
+			cmd, did, vport->port_state, vport->fc_flag,
+			vport->fc_myDID, vport->fc_prevDID);
+
+	/* reject till our FLOGI completes */
+	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
+	    (cmd != ELS_CMD_FLOGI)) {
+		rjt_err = LSRJT_LOGICAL_BSY;
+		rjt_exp = LSEXP_NOTHING_MORE;
+		goto lsrjt;
+	}
+
+	switch (cmd) {
+	case ELS_CMD_PLOGI:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPLOGI++;
+		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    (phba->pport->fc_flag & FC_PT2PT)) {
+			vport->fc_prevDID = vport->fc_myDID;
+			/* Our DID needs to be updated before registering
+			 * the vfi. This is done in lpfc_rcv_plogi but
+			 * that is called after the reg_vfi.
+			 */
+			vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					 "3312 Remote port assigned DID x%x "
+					 "%x\n", vport->fc_myDID,
+					 vport->fc_prevDID);
+		}
+
+		lpfc_send_els_event(vport, ndlp, payload);
+
+		/* If Nport discovery is delayed, reject PLOGIs */
+		if (vport->fc_flag & FC_DISC_DELAYED) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			break;
+		}
+
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			if (!(phba->pport->fc_flag & FC_PT2PT) ||
+				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+				rjt_err = LSRJT_UNABLE_TPC;
+				rjt_exp = LSEXP_NOTHING_MORE;
+				break;
+			}
+		}
+
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+
+		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+					NLP_EVT_RCV_PLOGI);
+
+		break;
+	case ELS_CMD_FLOGI:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFLOGI++;
+		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_LOGO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvLOGO++;
+		lpfc_send_els_event(vport, ndlp, payload);
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+		break;
+	case ELS_CMD_PRLO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPRLO++;
+		lpfc_send_els_event(vport, ndlp, payload);
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+		break;
+	case ELS_CMD_LCB:
+		phba->fc_stat.elsRcvLCB++;
+		lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_RDP:
+		phba->fc_stat.elsRcvRDP++;
+		lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_RSCN:
+		phba->fc_stat.elsRcvRSCN++;
+		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_ADISC:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		lpfc_send_els_event(vport, ndlp, payload);
+		phba->fc_stat.elsRcvADISC++;
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+					NLP_EVT_RCV_ADISC);
+		break;
+	case ELS_CMD_PDISC:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPDISC++;
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+					NLP_EVT_RCV_PDISC);
+		break;
+	case ELS_CMD_FARPR:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFARPR++;
+		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_FARP:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFARP++;
+		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_FAN:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFAN++;
+		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_PRLI:
+	case ELS_CMD_NVMEPRLI:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPRLI++;
+		if ((vport->port_state < LPFC_DISC_AUTH) &&
+		    (vport->fc_flag & FC_FABRIC)) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_NOTHING_MORE;
+			break;
+		}
+
+		/* NVMET accepts NVME PRLI only.  Reject FCP PRLI */
+		if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
+			rjt_err = LSRJT_CMD_UNSUPPORTED;
+			rjt_exp = LSEXP_REQ_UNSUPPORTED;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+		break;
+	case ELS_CMD_LIRR:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvLIRR++;
+		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RLS:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRLS++;
+		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RPS:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRPS++;
+		lpfc_els_rcv_rps(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RPL:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRPL++;
+		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RNID:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRNID++;
+		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RTV:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+		phba->fc_stat.elsRcvRTV++;
+		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RRQ:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRRQ++;
+		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_ECHO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvECHO++;
+		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_REC:
+			/* receive this due to exchange closed */
+			rjt_err = LSRJT_UNABLE_TPC;
+			rjt_exp = LSEXP_INVALID_OX_RX;
+		break;
+	default:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
+			cmd, did, vport->port_state);
+
+		/* Unsupported ELS command, reject */
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
+		rjt_exp = LSEXP_NOTHING_MORE;
+
+		/* Unknown ELS command <elsCmd> received from NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0115 Unknown ELS command x%x "
+				 "received from NPORT x%x\n", cmd, did);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	}
+
+lsrjt:
+	/* check if need to LS_RJT received ELS cmd */
+	if (rjt_err) {
+		memset(&stat, 0, sizeof(stat));
+		stat.un.b.lsRjtRsnCode = rjt_err;
+		stat.un.b.lsRjtRsnCodeExp = rjt_exp;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
+			NULL);
+	}
+
+	lpfc_nlp_put(elsiocb->context1);
+	elsiocb->context1 = NULL;
+	return;
+
+dropit:
+	if (vport && !(vport->load_flag & FC_UNLOADING))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0111 Dropping received ELS cmd "
+			"Data: x%x x%x x%x\n",
+			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+	phba->fc_stat.elsRcvDrop++;
+}
+
+/**
+ * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @elsiocb: pointer to lpfc els iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
+ * SLI ring on which the unsolicited event was received.
+ **/
+void
+lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		     struct lpfc_iocbq *elsiocb)
+{
+	struct lpfc_vport *vport = phba->pport;
+	IOCB_t *icmd = &elsiocb->iocb;
+	dma_addr_t paddr;
+	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
+	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+
+	elsiocb->context1 = NULL;
+	elsiocb->context2 = NULL;
+	elsiocb->context3 = NULL;
+
+	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
+		   (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+		   IOERR_RCV_BUFFER_WAITING) {
+		phba->fc_stat.NoRcvBuf++;
+		/* Not enough posted buffers; Try posting more buffers */
+		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+			lpfc_post_buffer(phba, pring, 0);
+		return;
+	}
+
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+			vport = phba->pport;
+		else
+			vport = lpfc_find_vport_by_vpid(phba,
+						icmd->unsli3.rcvsli3.vpi);
+	}
+
+	/* If there are no BDEs associated
+	 * with this IOCB, there is nothing to do.
+	 */
+	if (icmd->ulpBdeCount == 0)
+		return;
+
+	/* type of ELS cmd is first 32bit word
+	 * in packet
+	 */
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		elsiocb->context2 = bdeBuf1;
+	} else {
+		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
+				 icmd->un.cont64[0].addrLow);
+		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+							     paddr);
+	}
+
+	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+	/*
+	 * The different unsolicited event handlers would tell us
+	 * if they are done with "mp" by setting context2 to NULL.
+	 */
+	if (elsiocb->context2) {
+		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
+		elsiocb->context2 = NULL;
+	}
+
+	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
+	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
+	    icmd->ulpBdeCount == 2) {
+		elsiocb->context2 = bdeBuf2;
+		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+		/* free mp if we are done with it */
+		if (elsiocb->context2) {
+			lpfc_in_buf_free(phba, elsiocb->context2);
+			elsiocb->context2 = NULL;
+		}
+	}
+}
+
+static void
+lpfc_start_fdmi(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	/* If this is the first time, allocate an ndlp and initialize
+	 * it. Otherwise, make sure the node is enabled and then do the
+	 * login.
+	 */
+	ndlp = lpfc_findnode_did(vport, FDMI_DID);
+	if (!ndlp) {
+		ndlp = lpfc_nlp_init(vport, FDMI_DID);
+		if (ndlp) {
+			ndlp->nlp_type |= NLP_FABRIC;
+		} else {
+			return;
+		}
+	}
+	if (!NLP_CHK_NODE_ACT(ndlp))
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
+
+	if (ndlp) {
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+	}
+}
+
+/**
+ * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine issues a Port Login (PLOGI) to the Name Server with
+ * State Change Request (SCR) for a @vport. This routine will create an
+ * ndlp for the Name Server associated to the @vport if such node does
+ * not already exist. The PLOGI to Name Server is issued by invoking the
+ * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
+ * (FDMI) is configured to the @vport, a FDMI node will be created and
+ * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
+ **/
+void
+lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/*
+	 * If lpfc_delay_discovery parameter is set and the clean address
+	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
+	 * discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_flag & FC_DISC_DELAYED) {
+		spin_unlock_irq(shost->host_lock);
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"3334 Delay fc port discovery for %d seconds\n",
+				phba->fc_ratov);
+		mod_timer(&vport->delayed_disc_tmo,
+			jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
+		return;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (!ndlp) {
+		ndlp = lpfc_nlp_init(vport, NameServer_DID);
+		if (!ndlp) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				lpfc_disc_start(vport);
+				return;
+			}
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0251 NameServer login: no memory\n");
+			return;
+		}
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				lpfc_disc_start(vport);
+				return;
+			}
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					"0348 NameServer login: node freed\n");
+			return;
+		}
+	}
+	ndlp->nlp_type |= NLP_FABRIC;
+
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0252 Cannot issue NameServer login\n");
+		return;
+	}
+
+	if ((phba->cfg_enable_SmartSAN ||
+	     (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
+	     (vport->load_flag & FC_ALLOW_FDMI))
+		lpfc_start_fdmi(vport);
+}
+
+/**
+ * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function to register new vport
+ * mailbox command. If the new vport mailbox command completes successfully,
+ * the fabric registration login shall be performed on physical port (the
+ * new vport created is actually a physical port, with VPI 0) or the port
+ * login to Name Server for State Change Request (SCR) will be performed
+ * on virtual port (real virtual port, with VPI greater than 0).
+ **/
+static void
+lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	MAILBOX_t *mb = &pmb->u.mb;
+	int rc;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	if (mb->mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				"0915 Register VPI failed : Status: x%x"
+				" upd bit: x%x \n", mb->mbxStatus,
+				 mb->un.varRegVpi.upd);
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+			mb->un.varRegVpi.upd)
+			goto mbox_err_exit ;
+
+		switch (mb->mbxStatus) {
+		case 0x11:	/* unsupported feature */
+		case 0x9603:	/* max_vpi exceeded */
+		case 0x9602:	/* Link event since CLEAR_LA */
+			/* giving up on vport registration */
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+			spin_unlock_irq(shost->host_lock);
+			lpfc_can_disctmo(vport);
+			break;
+		/* If reg_vpi fail with invalid VPI status, re-init VPI */
+		case 0x20:
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_init_vpi(phba, pmb, vport->vpi);
+			pmb->vport = vport;
+			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
+			rc = lpfc_sli_issue_mbox(phba, pmb,
+				MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED) {
+				lpfc_printf_vlog(vport,
+					KERN_ERR, LOG_MBOX,
+					"2732 Failed to issue INIT_VPI"
+					" mailbox command\n");
+			} else {
+				lpfc_nlp_put(ndlp);
+				return;
+			}
+
+		default:
+			/* Try to recover from this error */
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_unreg_all_rpis(vport);
+			lpfc_mbx_unreg_vpi(vport);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			spin_unlock_irq(shost->host_lock);
+			if (mb->mbxStatus == MBX_NOT_FINISHED)
+				break;
+			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+				if (phba->sli_rev == LPFC_SLI_REV4)
+					lpfc_issue_init_vfi(vport);
+				else
+					lpfc_initial_flogi(vport);
+			} else {
+				lpfc_initial_fdisc(vport);
+			}
+			break;
+		}
+	} else {
+		spin_lock_irq(shost->host_lock);
+		vport->vpi_state |= LPFC_VPI_REGISTERED;
+		spin_unlock_irq(shost->host_lock);
+		if (vport == phba->pport) {
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else {
+				/*
+				 * If the physical port is instantiated using
+				 * FDISC, do not start vport discovery.
+				 */
+				if (vport->port_state != LPFC_FDISC)
+					lpfc_start_fdiscs(phba);
+				lpfc_do_scr_ns_plogi(phba, vport);
+			}
+		} else
+			lpfc_do_scr_ns_plogi(phba, vport);
+	}
+mbox_err_exit:
+	/* Now, we decrement the ndlp reference count held for this
+	 * callback function
+	 */
+	lpfc_nlp_put(ndlp);
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_register_new_vport - Register a new vport with a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine registers the @vport as a new virtual port with a HBA.
+ * It is done through a registering vpi mailbox command.
+ **/
+void
+lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	LPFC_MBOXQ_t *mbox;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_reg_vpi(vport, mbox);
+		mbox->vport = vport;
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+		    == MBX_NOT_FINISHED) {
+			/* mailbox command not success, decrement ndlp
+			 * reference count for this command
+			 */
+			lpfc_nlp_put(ndlp);
+			mempool_free(mbox, phba->mbox_mem_pool);
+
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				"0253 Register VPI: Can't send mbox\n");
+			goto mbox_err_exit;
+		}
+	} else {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0254 Register VPI: no memory\n");
+		goto mbox_err_exit;
+	}
+	return;
+
+mbox_err_exit:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+	return;
+}
+
+/**
+ * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine cancels the retry delay timers to all the vports.
+ **/
+void
+lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	uint32_t link_state;
+	int i;
+
+	/* Treat this failure as linkdown for all vports */
+	link_state = phba->link_state;
+	lpfc_linkdown(phba);
+	phba->link_state = link_state;
+
+	vports = lpfc_create_vport_work_array(phba);
+
+	if (vports) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+			if (ndlp)
+				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+			lpfc_els_flush_cmd(vports[i]);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	}
+}
+
+/**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+
+	/* Cancel the all vports retry delay retry timers */
+	lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+	/* If fabric require FLOGI, then re-instantiate physical login */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (!ndlp)
+		return;
+
+	shost = lpfc_shost_from_vport(phba->pport);
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+	phba->pport->port_state = LPFC_FLOGI;
+	return;
+}
+
+/**
+ * lpfc_fabric_login_reqd - Check if FLOGI required.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to FDISC command iocb.
+ * @rspiocb: pointer to FDISC response iocb.
+ *
+ * This routine checks if a FLOGI is reguired for FDISC
+ * to succeed.
+ **/
+static int
+lpfc_fabric_login_reqd(struct lpfc_hba *phba,
+		struct lpfc_iocbq *cmdiocb,
+		struct lpfc_iocbq *rspiocb)
+{
+
+	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
+		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+		return 0;
+	else
+		return 1;
+}
+
+/**
+ * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to a Fabric Discover
+ * (FDISC) ELS command. Since all the FDISC ELS commands are issued
+ * single threaded, each FDISC completion callback function will reset
+ * the discovery timer for all vports such that the timers will not get
+ * unnecessary timeout. The function checks the FDISC IOCB status. If error
+ * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
+ * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
+ * assigned to the vport has been changed with the completion of the FDISC
+ * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
+ * are unregistered from the HBA, and then the lpfc_register_new_vport()
+ * routine is invoked to register new vport with the HBA. Otherwise, the
+ * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
+ * Server for State Change Request (SCR).
+ **/
+static void
+lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_nodelist *np;
+	struct lpfc_nodelist *next_np;
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_iocbq *piocb;
+	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+	struct serv_parm *sp;
+	uint8_t fabric_param_changed;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
+			 irsp->ulpStatus, irsp->un.ulpWord[4],
+			 vport->fc_prevDID);
+	/* Since all FDISCs are being single threaded, we
+	 * must reset the discovery timer for ALL vports
+	 * waiting to send FDISC when one completes.
+	 */
+	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
+		lpfc_set_disctmo(piocb->vport);
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+	if (irsp->ulpStatus) {
+
+		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
+			lpfc_retry_pport_discovery(phba);
+			goto out;
+		}
+
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+			goto out;
+		/* FDISC failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0126 FDISC failed. (x%x/x%x)\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4]);
+		goto fdisc_failed;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
+	vport->fc_flag |= FC_FABRIC;
+	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+		vport->fc_flag |=  FC_PUBLIC_LOOP;
+	spin_unlock_irq(shost->host_lock);
+
+	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	if (!prsp)
+		goto out;
+	sp = prsp->virt + sizeof(uint32_t);
+	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+	memcpy(&vport->fabric_portname, &sp->portName,
+		sizeof(struct lpfc_name));
+	memcpy(&vport->fabric_nodename, &sp->nodeName,
+		sizeof(struct lpfc_name));
+	if (fabric_param_changed &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+		/* If our NportID changed, we need to ensure all
+		 * remaining NPORTs get unreg_login'ed so we can
+		 * issue unreg_vpi.
+		 */
+		list_for_each_entry_safe(np, next_np,
+			&vport->fc_nodes, nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp) ||
+			    (np->nlp_state != NLP_STE_NPR_NODE) ||
+			    !(np->nlp_flag & NLP_NPR_ADISC))
+				continue;
+			spin_lock_irq(shost->host_lock);
+			np->nlp_flag &= ~NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_unreg_rpi(vport, np);
+		}
+		lpfc_cleanup_pending_mbox(vport);
+
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
+
+		lpfc_mbx_unreg_vpi(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		else
+			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
+		spin_unlock_irq(shost->host_lock);
+	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+		/*
+		 * Driver needs to re-reg VPI in order for f/w
+		 * to update the MAC address.
+		 */
+		lpfc_register_new_vport(phba, vport, ndlp);
+		goto out;
+	}
+
+	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+		lpfc_issue_init_vpi(vport);
+	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+		lpfc_register_new_vport(phba, vport, ndlp);
+	else
+		lpfc_do_scr_ns_plogi(phba, vport);
+	goto out;
+fdisc_failed:
+	if (vport->fc_vport &&
+	    (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	/* Cancel discovery timer */
+	lpfc_can_disctmo(vport);
+	lpfc_nlp_put(ndlp);
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_els_fdisc - Issue a fdisc iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
+ * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
+ * routine to issue the IOCB, which makes sure only one outstanding fabric
+ * IOCB will be sent off HBA at any given time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FDISC ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued fdisc iocb command
+ *   1 - Failed to issue fdisc iocb command
+ **/
+static int
+lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		     uint8_t retry)
+{
+	struct lpfc_hba *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	struct serv_parm *sp;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int did = ndlp->nlp_DID;
+	int rc;
+
+	vport->port_state = LPFC_FDISC;
+	vport->fc_myDID = 0;
+	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+				     ELS_CMD_FDISC);
+	if (!elsiocb) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0255 Issue FDISC: no IOCB\n");
+		return 1;
+	}
+
+	icmd = &elsiocb->iocb;
+	icmd->un.elsreq64.myID = 0;
+	icmd->un.elsreq64.fl = 1;
+
+	/*
+	 * SLI3 ports require a different context type value than SLI4.
+	 * Catch SLI3 ports here and override the prep.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV3) {
+		icmd->ulpCt_h = 1;
+		icmd->ulpCt_l = 0;
+	}
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
+	pcmd += sizeof(uint32_t); /* CSP Word 1 */
+	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
+	sp = (struct serv_parm *) pcmd;
+	/* Setup CSPs accordingly for Fabric */
+	sp->cmn.e_d_tov = 0;
+	sp->cmn.w2.r_a_tov = 0;
+	sp->cmn.virtual_fabric_support = 0;
+	sp->cls1.classValid = 0;
+	sp->cls2.seqDelivery = 1;
+	sp->cls3.seqDelivery = 1;
+
+	pcmd += sizeof(uint32_t); /* CSP Word 2 */
+	pcmd += sizeof(uint32_t); /* CSP Word 3 */
+	pcmd += sizeof(uint32_t); /* CSP Word 4 */
+	pcmd += sizeof(uint32_t); /* Port Name */
+	memcpy(pcmd, &vport->fc_portname, 8);
+	pcmd += sizeof(uint32_t); /* Node Name */
+	pcmd += sizeof(uint32_t); /* Node Name */
+	memcpy(pcmd, &vport->fc_nodename, 8);
+	sp->cmn.valid_vendor_ver_level = 0;
+	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
+	lpfc_set_disctmo(vport);
+
+	phba->fc_stat.elsXmitFDISC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue FDISC:     did:x%x",
+		did, 0, 0);
+
+	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0256 Issue FDISC: Cannot send IOCB\n");
+		return 1;
+	}
+	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the issuing of a LOGO
+ * ELS command off a vport. It frees the command IOCB and then decrement the
+ * reference count held on ndlp for this completion function, indicating that
+ * the reference to the ndlp is no long needed. Note that the
+ * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
+ * callback function and an additional explicit ndlp reference decrementation
+ * will trigger the actual release of the ndlp.
+ **/
+static void
+lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+
+	lpfc_els_free_iocb(phba, cmdiocb);
+	vport->unreg_vpi_cmpl = VPORT_ERROR;
+
+	/* Trigger the release of the ndlp after logo */
+	lpfc_nlp_put(ndlp);
+
+	/* NPIV LOGO completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2928 NPIV LOGO completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, vport->num_disc_nodes);
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NDISC_ACTIVE;
+		vport->fc_flag &= ~FC_FABRIC;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_can_disctmo(vport);
+	}
+}
+
+/**
+ * lpfc_issue_els_npiv_logo - Issue a logo off a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a LOGO ELS command to an @ndlp off a @vport.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return codes
+ *   0 - Successfully issued logo off the @vport
+ *   1 - Failed to issue logo off the @vport
+ **/
+int
+lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+
+	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
+				     ELS_CMD_LOGO);
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in LOGO payload */
+	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue LOGO npiv  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_SND;
+	spin_unlock_irq(shost->host_lock);
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_LOGO_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_fabric_block_timeout - Handler function to the fabric block timer
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the fabric iocb block timer after
+ * timeout. It posts the fabric iocb block timeout event by setting the
+ * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
+ * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
+ * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
+ * posted event WORKER_FABRIC_BLOCK_TMO.
+ **/
+void
+lpfc_fabric_block_timeout(unsigned long ptr)
+{
+	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
+	unsigned long iflags;
+	uint32_t tmo_posted;
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues one fabric iocb from the driver internal list to
+ * the HBA. It first checks whether it's ready to issue one fabric iocb to
+ * the HBA (whether there is no outstanding fabric iocb). If so, it shall
+ * remove one pending fabric iocb from the driver internal list and invokes
+ * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
+ **/
+static void
+lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq *iocb;
+	unsigned long iflags;
+	int ret;
+	IOCB_t *cmd;
+
+repeat:
+	iocb = NULL;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Post any pending iocb to the SLI layer */
+	if (atomic_read(&phba->fabric_iocb_count) == 0) {
+		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
+				 list);
+		if (iocb)
+			/* Increment fabric iocb count to hold the position */
+			atomic_inc(&phba->fabric_iocb_count);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (iocb) {
+		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+		iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+			"Fabric sched1:   ste:x%x",
+			iocb->vport->port_state, 0, 0);
+
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
+
+		if (ret == IOCB_ERROR) {
+			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+			iocb->fabric_iocb_cmpl = NULL;
+			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+			cmd = &iocb->iocb;
+			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+			iocb->iocb_cmpl(phba, iocb, iocb);
+
+			atomic_dec(&phba->fabric_iocb_count);
+			goto repeat;
+		}
+	}
+
+	return;
+}
+
+/**
+ * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine unblocks the  issuing fabric iocb command. The function
+ * will clear the fabric iocb block bit and then invoke the routine
+ * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
+ * from the driver internal fabric iocb list.
+ **/
+void
+lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
+{
+	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+	lpfc_resume_fabric_iocbs(phba);
+	return;
+}
+
+/**
+ * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine blocks the issuing fabric iocb for a specified amount of
+ * time (currently 100 ms). This is done by set the fabric iocb block bit
+ * and set up a timeout timer for 100ms. When the block bit is set, no more
+ * fabric iocb will be issued out of the HBA.
+ **/
+static void
+lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
+{
+	int blocked;
+
+	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+	/* Start a timer to unblock fabric iocbs after 100ms */
+	if (!blocked)
+		mod_timer(&phba->fabric_block_timer,
+			  jiffies + msecs_to_jiffies(100));
+
+	return;
+}
+
+/**
+ * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function that is put to the fabric iocb's
+ * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * function first restores and invokes the original iocb's callback function
+ * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
+ * fabric bound iocb from the driver internal fabric iocb list onto the wire.
+ **/
+static void
+lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+	struct lpfc_iocbq *rspiocb)
+{
+	struct ls_rjt stat;
+
+	BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
+
+	switch (rspiocb->iocb.ulpStatus) {
+		case IOSTAT_NPORT_RJT:
+		case IOSTAT_FABRIC_RJT:
+			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+				lpfc_block_fabric_iocbs(phba);
+			}
+			break;
+
+		case IOSTAT_NPORT_BSY:
+		case IOSTAT_FABRIC_BSY:
+			lpfc_block_fabric_iocbs(phba);
+			break;
+
+		case IOSTAT_LS_RJT:
+			stat.un.lsRjtError =
+				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
+				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
+				lpfc_block_fabric_iocbs(phba);
+			break;
+	}
+
+	BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
+
+	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
+	cmdiocb->fabric_iocb_cmpl = NULL;
+	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+
+	atomic_dec(&phba->fabric_iocb_count);
+	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
+		/* Post any pending iocbs to HBA */
+		lpfc_resume_fabric_iocbs(phba);
+	}
+}
+
+/**
+ * lpfc_issue_fabric_iocb - Issue a fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ * @iocb: pointer to lpfc command iocb data structure.
+ *
+ * This routine is used as the top-level API for issuing a fabric iocb command
+ * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
+ * function makes sure that only one fabric bound iocb will be outstanding at
+ * any given time. As such, this function will first check to see whether there
+ * is already an outstanding fabric iocb on the wire. If so, it will put the
+ * newly issued iocb onto the driver internal fabric iocb list, waiting to be
+ * issued later. Otherwise, it will issue the iocb on the wire and update the
+ * fabric iocb count it indicate that there is one fabric iocb on the wire.
+ *
+ * Note, this implementation has a potential sending out fabric IOCBs out of
+ * order. The problem is caused by the construction of the "ready" boolen does
+ * not include the condition that the internal fabric IOCB list is empty. As
+ * such, it is possible a fabric IOCB issued by this routine might be "jump"
+ * ahead of the fabric IOCBs in the internal list.
+ *
+ * Return code
+ *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
+ *   IOCB_ERROR - failed to issue fabric iocb
+ **/
+static int
+lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
+{
+	unsigned long iflags;
+	int ready;
+	int ret;
+
+	BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
+		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+	if (ready)
+		/* Increment fabric iocb count to hold the position */
+		atomic_inc(&phba->fabric_iocb_count);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (ready) {
+		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+		iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+			"Fabric sched2:   ste:x%x",
+			iocb->vport->port_state, 0, 0);
+
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
+
+		if (ret == IOCB_ERROR) {
+			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+			iocb->fabric_iocb_cmpl = NULL;
+			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+			atomic_dec(&phba->fabric_iocb_count);
+		}
+	} else {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		ret = IOCB_SUCCESS;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine aborts all the IOCBs associated with a @vport from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @vport off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+				 list) {
+
+		if (piocb->vport != vport)
+			continue;
+
+		list_move_tail(&piocb->list, &completions);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine aborts all the IOCBs associated with an @ndlp from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @ndlp off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba  *phba = ndlp->phba;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	struct lpfc_sli_ring *pring;
+
+	pring = lpfc_phba_elsring(phba);
+
+	if (unlikely(!pring))
+		return;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+				 list) {
+		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
+
+			list_move_tail(&piocb->list, &completions);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the IOCBs currently on the driver internal
+ * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
+ * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
+ * list, removes IOCBs off the list, set the status feild to
+ * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
+ * the IOCB.
+ **/
+void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&phba->fabric_iocb_list, &completions);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+			sglq_entry->ndlp = NULL;
+	}
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+	uint16_t lxri = 0;
+
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_sli_ring *pring;
+
+	pring = lpfc_phba_elsring(phba);
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->sli4_xritag == xri) {
+			list_del(&sglq_entry->list);
+			ndlp = sglq_entry->ndlp;
+			sglq_entry->ndlp = NULL;
+			list_add_tail(&sglq_entry->list,
+				&phba->sli4_hba.lpfc_els_sgl_list);
+			sglq_entry->state = SGL_FREED;
+			spin_unlock(&phba->sli4_hba.sgl_list_lock);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			lpfc_set_rrq_active(phba, ndlp,
+				sglq_entry->sli4_lxritag,
+				rxid, 1);
+
+			/* Check if TXQ queue needs to be serviced */
+			if (pring && !list_empty(&pring->txq))
+				lpfc_worker_wake_up(phba);
+			return;
+		}
+	}
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	lxri = lpfc_sli4_xri_inrange(phba, xri);
+	if (lxri == NO_XRI) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return;
+	}
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
+	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return;
+	}
+	sglq_entry->state = SGL_XRI_ABORTED;
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted node.
+ *
+ * The driver calls this routine in response to an SLI4 XRI ABORT CQE
+ * or an SLI3 ASYNC_STATUS_CN event from the port.  For either event,
+ * the driver is required to send a LOGO to the remote node before it
+ * attempts to recover its login to the remote node.
+ */
+void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+			   struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_hba *phba;
+	unsigned long flags = 0;
+
+	shost = lpfc_shost_from_vport(vport);
+	phba = vport->phba;
+	if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+		lpfc_printf_log(phba, KERN_INFO,
+				LOG_SLI, "3093 No rport recovery needed. "
+				"rport in state 0x%x\n", ndlp->nlp_state);
+		return;
+	}
+	lpfc_printf_log(phba, KERN_ERR,
+			LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
+			"3094 Start rport recovery on shost id 0x%x "
+			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+			"flags 0x%x\n",
+			shost->host_no, ndlp->nlp_DID,
+			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+			ndlp->nlp_flag);
+	/*
+	 * The rport is not responding.  Remove the FCP-2 flag to prevent
+	 * an ADISC in the follow-up recovery code.
+	 */
+	spin_lock_irqsave(shost->host_lock, flags);
+	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	lpfc_unreg_rpi(vport, ndlp);
+}
+
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hbadisc.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hbadisc.c
new file mode 100644
index 0000000..4a0889d
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -0,0 +1,6739 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/lockdep.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* AlpaArray for assignment of scsid for scan-down and bind_method */
+static uint8_t lpfcAlpaArray[] = {
+	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
+	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
+	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
+	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
+	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
+	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
+	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
+	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
+	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
+	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
+	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
+	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
+	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
+};
+
+static void lpfc_disc_timeout_handler(struct lpfc_vport *);
+static void lpfc_disc_flush_list(struct lpfc_vport *vport);
+static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
+
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist * ndlp;
+	struct lpfc_hba *phba;
+
+	rdata = rport->dd_data;
+	ndlp = rdata->pnode;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+			printk(KERN_ERR "Cannot find remote node"
+			" to terminate I/O Data x%x\n",
+			rport->port_id);
+		return;
+	}
+
+	phba  = ndlp->phba;
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+		"rport terminate: sid:x%x did:x%x flg:x%x",
+		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+	if (ndlp->nlp_sid != NLP_NO_SID) {
+		lpfc_sli_abort_iocb(ndlp->vport,
+			&phba->sli.sli3_ring[LPFC_FCP_RING],
+			ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+	}
+}
+
+/*
+ * This function will be called when dev_loss_tmo fire.
+ */
+void
+lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist * ndlp;
+	struct lpfc_vport *vport;
+	struct Scsi_Host *shost;
+	struct lpfc_hba   *phba;
+	struct lpfc_work_evt *evtp;
+	int  put_node;
+	int  put_rport;
+
+	rdata = rport->dd_data;
+	ndlp = rdata->pnode;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	vport = ndlp->vport;
+	phba  = vport->phba;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport devlosscb: sid:x%x did:x%x flg:x%x",
+		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+			 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
+	/* Don't defer this if we are in the process of deleting the vport
+	 * or unloading the driver. The unload will cleanup the node
+	 * appropriately we just need to cleanup the ndlp rport info here.
+	 */
+	if (vport->load_flag & FC_UNLOADING) {
+		put_node = rdata->pnode != NULL;
+		put_rport = ndlp->rport != NULL;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		if (put_node)
+			lpfc_nlp_put(ndlp);
+		if (put_rport)
+			put_device(&rport->dev);
+		return;
+	}
+
+	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+		return;
+
+	if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+				"6789 rport name %llx != node port name %llx",
+				rport->port_name,
+				wwn_to_u64(ndlp->nlp_portname.u.wwn));
+
+	evtp = &ndlp->dev_loss_evt;
+
+	if (!list_empty(&evtp->evt_listp)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+				"6790 rport name %llx dev_loss_evt pending",
+				rport->port_name);
+		return;
+	}
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
+	spin_unlock_irq(shost->host_lock);
+
+	/* We need to hold the node by incrementing the reference
+	 * count until this queued work is done
+	 */
+	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+
+	spin_lock_irq(&phba->hbalock);
+	if (evtp->evt_arg1) {
+		evtp->evt = LPFC_EVT_DEV_LOSS;
+		list_add_tail(&evtp->evt_listp, &phba->work_list);
+		lpfc_worker_wake_up(phba);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
+lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_rport_data *rdata;
+	struct fc_rport   *rport;
+	struct lpfc_vport *vport;
+	struct lpfc_hba   *phba;
+	struct Scsi_Host  *shost;
+	uint8_t *name;
+	int  put_node;
+	int warn_on = 0;
+	int fcf_inuse = 0;
+
+	rport = ndlp->rport;
+	vport = ndlp->vport;
+	shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+	spin_unlock_irq(shost->host_lock);
+
+	if (!rport)
+		return fcf_inuse;
+
+	name = (uint8_t *) &ndlp->nlp_portname;
+	phba  = vport->phba;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		fcf_inuse = lpfc_fcf_inuse(phba);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport devlosstmo:did:x%x type:x%x id:x%x",
+		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+			 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
+
+	/*
+	 * lpfc_nlp_remove if reached with dangling rport drops the
+	 * reference. To make sure that does not happen clear rport
+	 * pointer in ndlp before lpfc_nlp_put.
+	 */
+	rdata = rport->dd_data;
+
+	/* Don't defer this if we are in the process of deleting the vport
+	 * or unloading the driver. The unload will cleanup the node
+	 * appropriately we just need to cleanup the ndlp rport info here.
+	 */
+	if (vport->load_flag & FC_UNLOADING) {
+		if (ndlp->nlp_sid != NLP_NO_SID) {
+			/* flush the target */
+			lpfc_sli_abort_iocb(vport,
+					    &phba->sli.sli3_ring[LPFC_FCP_RING],
+					    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+		}
+		put_node = rdata->pnode != NULL;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		if (put_node)
+			lpfc_nlp_put(ndlp);
+		put_device(&rport->dev);
+
+		return fcf_inuse;
+	}
+
+	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0284 Devloss timeout Ignored on "
+				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+				 "NPort x%x\n",
+				 *name, *(name+1), *(name+2), *(name+3),
+				 *(name+4), *(name+5), *(name+6), *(name+7),
+				 ndlp->nlp_DID);
+		return fcf_inuse;
+	}
+
+	put_node = rdata->pnode != NULL;
+	rdata->pnode = NULL;
+	ndlp->rport = NULL;
+	if (put_node)
+		lpfc_nlp_put(ndlp);
+	put_device(&rport->dev);
+
+	if (ndlp->nlp_type & NLP_FABRIC)
+		return fcf_inuse;
+
+	if (ndlp->nlp_sid != NLP_NO_SID) {
+		warn_on = 1;
+		lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
+				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+	}
+
+	if (warn_on) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0203 Devloss timeout on "
+				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+				 "NPort x%06x Data: x%x x%x x%x\n",
+				 *name, *(name+1), *(name+2), *(name+3),
+				 *(name+4), *(name+5), *(name+6), *(name+7),
+				 ndlp->nlp_DID, ndlp->nlp_flag,
+				 ndlp->nlp_state, ndlp->nlp_rpi);
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0204 Devloss timeout on "
+				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+				 "NPort x%06x Data: x%x x%x x%x\n",
+				 *name, *(name+1), *(name+2), *(name+3),
+				 *(name+4), *(name+5), *(name+6), *(name+7),
+				 ndlp->nlp_DID, ndlp->nlp_flag,
+				 ndlp->nlp_state, ndlp->nlp_rpi);
+	}
+
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
+	    (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
+	return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+				    uint32_t nlp_did)
+{
+	/* If devloss timeout happened to a remote node when FCF had no
+	 * longer been in-use, do nothing.
+	 */
+	if (!fcf_inuse)
+		return;
+
+	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+		spin_lock_irq(&phba->hbalock);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+				spin_unlock_irq(&phba->hbalock);
+				return;
+			}
+			phba->hba_flag |= HBA_DEVLOSS_TMO;
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2847 Last remote node (x%x) using "
+					"FCF devloss tmo\n", nlp_did);
+		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2868 Devloss tmo to FCF rediscovery "
+					"in progress\n");
+			return;
+		}
+		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2869 Devloss tmo to idle FIP engine, "
+					"unreg in-use FCF and rescan.\n");
+			/* Unregister in-use FCF and rescan */
+			lpfc_unregister_fcf_rescan(phba);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG)
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2870 FCF table scan in progress\n");
+		if (phba->hba_flag & FCF_RR_INPROG)
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2871 FLOGI roundrobin FCF failover "
+					"in progress\n");
+	}
+	lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_alloc_fast_evt - Allocates data structure for posting event
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+	struct lpfc_fast_path_event *ret;
+
+	/* If there are lot of fast event do not exhaust memory due to this */
+	if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+		return NULL;
+
+	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+			GFP_ATOMIC);
+	if (ret) {
+		atomic_inc(&phba->fast_event_count);
+		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_free_fast_evt - Frees event data structure
+ * @phba: Pointer to hba context object.
+ * @evt:  Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
+void
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+		struct lpfc_fast_path_event *evt) {
+
+	atomic_dec(&phba->fast_event_count);
+	kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt - Posts events generated from fast path
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+		struct lpfc_work_evt *evtp)
+{
+	unsigned long evt_category, evt_sub_category;
+	struct lpfc_fast_path_event *fast_evt_data;
+	char *evt_data;
+	uint32_t evt_data_size;
+	struct Scsi_Host *shost;
+
+	fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+		work_evt);
+
+	evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+	evt_sub_category = (unsigned long) fast_evt_data->un.
+			fabric_evt.subcategory;
+	shost = lpfc_shost_from_vport(fast_evt_data->vport);
+	if (evt_category == FC_REG_FABRIC_EVENT) {
+		if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+			evt_data = (char *) &fast_evt_data->un.read_check_error;
+			evt_data_size = sizeof(fast_evt_data->un.
+				read_check_error);
+		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+			evt_data = (char *) &fast_evt_data->un.fabric_evt;
+			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+		} else {
+			lpfc_free_fast_evt(phba, fast_evt_data);
+			return;
+		}
+	} else if (evt_category == FC_REG_SCSI_EVENT) {
+		switch (evt_sub_category) {
+		case LPFC_EVENT_QFULL:
+		case LPFC_EVENT_DEVBSY:
+			evt_data = (char *) &fast_evt_data->un.scsi_evt;
+			evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+			break;
+		case LPFC_EVENT_CHECK_COND:
+			evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+			evt_data_size =  sizeof(fast_evt_data->un.
+				check_cond_evt);
+			break;
+		case LPFC_EVENT_VARQUEDEPTH:
+			evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+			evt_data_size = sizeof(fast_evt_data->un.
+				queue_depth_evt);
+			break;
+		default:
+			lpfc_free_fast_evt(phba, fast_evt_data);
+			return;
+		}
+	} else {
+		lpfc_free_fast_evt(phba, fast_evt_data);
+		return;
+	}
+
+	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			evt_data_size,
+			evt_data,
+			LPFC_NL_VENDOR_ID);
+
+	lpfc_free_fast_evt(phba, fast_evt_data);
+	return;
+}
+
+static void
+lpfc_work_list_done(struct lpfc_hba *phba)
+{
+	struct lpfc_work_evt  *evtp = NULL;
+	struct lpfc_nodelist  *ndlp;
+	int free_evt;
+	int fcf_inuse;
+	uint32_t nlp_did;
+
+	spin_lock_irq(&phba->hbalock);
+	while (!list_empty(&phba->work_list)) {
+		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
+				 evt_listp);
+		spin_unlock_irq(&phba->hbalock);
+		free_evt = 1;
+		switch (evtp->evt) {
+		case LPFC_EVT_ELS_RETRY:
+			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
+			lpfc_els_retry_delay_handler(ndlp);
+			free_evt = 0; /* evt is part of ndlp */
+			/* decrement the node reference count held
+			 * for this queued work
+			 */
+			lpfc_nlp_put(ndlp);
+			break;
+		case LPFC_EVT_DEV_LOSS:
+			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
+			free_evt = 0;
+			/* decrement the node reference count held for
+			 * this queued work
+			 */
+			nlp_did = ndlp->nlp_DID;
+			lpfc_nlp_put(ndlp);
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_post_dev_loss_tmo_handler(phba,
+								    fcf_inuse,
+								    nlp_did);
+			break;
+		case LPFC_EVT_ONLINE:
+			if (phba->link_state < LPFC_LINK_DOWN)
+				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
+			else
+				*(int *) (evtp->evt_arg1) = 0;
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_OFFLINE_PREP:
+			if (phba->link_state >= LPFC_LINK_DOWN)
+				lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+			*(int *)(evtp->evt_arg1) = 0;
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_OFFLINE:
+			lpfc_offline(phba);
+			lpfc_sli_brdrestart(phba);
+			*(int *)(evtp->evt_arg1) =
+				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
+			lpfc_unblock_mgmt_io(phba);
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_WARM_START:
+			lpfc_offline(phba);
+			lpfc_reset_barrier(phba);
+			lpfc_sli_brdreset(phba);
+			lpfc_hba_down_post(phba);
+			*(int *)(evtp->evt_arg1) =
+				lpfc_sli_brdready(phba, HS_MBRDY);
+			lpfc_unblock_mgmt_io(phba);
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_KILL:
+			lpfc_offline(phba);
+			*(int *)(evtp->evt_arg1)
+				= (phba->pport->stopped)
+				        ? 0 : lpfc_sli_brdkill(phba);
+			lpfc_unblock_mgmt_io(phba);
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_FASTPATH_MGMT_EVT:
+			lpfc_send_fastpath_evt(phba, evtp);
+			free_evt = 0;
+			break;
+		case LPFC_EVT_RESET_HBA:
+			if (!(phba->pport->load_flag & FC_UNLOADING))
+				lpfc_reset_hba(phba);
+			break;
+		}
+		if (free_evt)
+			kfree(evtp);
+		spin_lock_irq(&phba->hbalock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+}
+
+static void
+lpfc_work_done(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring *pring;
+	uint32_t ha_copy, status, control, work_port_events;
+	struct lpfc_vport **vports;
+	struct lpfc_vport *vport;
+	int i;
+
+	spin_lock_irq(&phba->hbalock);
+	ha_copy = phba->work_ha;
+	phba->work_ha = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* First, try to post the next mailbox command to SLI4 device */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+		lpfc_sli4_post_async_mbox(phba);
+
+	if (ha_copy & HA_ERATT)
+		/* Handle the error attention event */
+		lpfc_handle_eratt(phba);
+
+	if (ha_copy & HA_MBATT)
+		lpfc_sli_handle_mb_event(phba);
+
+	if (ha_copy & HA_LATT)
+		lpfc_handle_latt(phba);
+
+	/* Process SLI4 events */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+		if (phba->hba_flag & HBA_RRQ_ACTIVE)
+			lpfc_handle_rrq_active(phba);
+		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
+			lpfc_sli4_fcp_xri_abort_event_proc(phba);
+		if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
+			lpfc_sli4_nvme_xri_abort_event_proc(phba);
+		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+			lpfc_sli4_els_xri_abort_event_proc(phba);
+		if (phba->hba_flag & ASYNC_EVENT)
+			lpfc_sli4_async_event_proc(phba);
+		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
+			lpfc_sli4_fcf_redisc_event_proc(phba);
+	}
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports; i++) {
+			/*
+			 * We could have no vports in array if unloading, so if
+			 * this happens then just use the pport
+			 */
+			if (vports[i] == NULL && i == 0)
+				vport = phba->pport;
+			else
+				vport = vports[i];
+			if (vport == NULL)
+				break;
+			spin_lock_irq(&vport->work_port_lock);
+			work_port_events = vport->work_port_events;
+			vport->work_port_events &= ~work_port_events;
+			spin_unlock_irq(&vport->work_port_lock);
+			if (work_port_events & WORKER_DISC_TMO)
+				lpfc_disc_timeout_handler(vport);
+			if (work_port_events & WORKER_ELS_TMO)
+				lpfc_els_timeout_handler(vport);
+			if (work_port_events & WORKER_HB_TMO)
+				lpfc_hb_timeout_handler(phba);
+			if (work_port_events & WORKER_MBOX_TMO)
+				lpfc_mbox_timeout_handler(phba);
+			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
+				lpfc_unblock_fabric_iocbs(phba);
+			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
+				lpfc_ramp_down_queue_handler(phba);
+			if (work_port_events & WORKER_DELAYED_DISC_TMO)
+				lpfc_delayed_disc_timeout_handler(vport);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	pring = lpfc_phba_elsring(phba);
+	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
+	status >>= (4*LPFC_ELS_RING);
+	if (pring && (status & HA_RXMASK ||
+		      pring->flag & LPFC_DEFERRED_RING_EVENT ||
+		      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
+			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Preserve legacy behavior. */
+			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
+		} else {
+			if (phba->link_state >= LPFC_LINK_UP ||
+			    phba->link_flag & LS_MDS_LOOPBACK) {
+				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+				lpfc_sli_handle_slow_ring_event(phba, pring,
+								(status &
+								HA_RXMASK));
+			}
+		}
+		if ((phba->sli_rev == LPFC_SLI_REV4) &&
+				 (!list_empty(&pring->txq)))
+			lpfc_drain_txq(phba);
+		/*
+		 * Turn on Ring interrupts
+		 */
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			spin_lock_irq(&phba->hbalock);
+			control = readl(phba->HCregaddr);
+			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+				lpfc_debugfs_slow_ring_trc(phba,
+					"WRK Enable ring: cntl:x%x hacopy:x%x",
+					control, ha_copy, 0);
+
+				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
+				writel(control, phba->HCregaddr);
+				readl(phba->HCregaddr); /* flush */
+			} else {
+				lpfc_debugfs_slow_ring_trc(phba,
+					"WRK Ring ok:     cntl:x%x hacopy:x%x",
+					control, ha_copy, 0);
+			}
+			spin_unlock_irq(&phba->hbalock);
+		}
+	}
+	lpfc_work_list_done(phba);
+}
+
+int
+lpfc_do_work(void *p)
+{
+	struct lpfc_hba *phba = p;
+	int rc;
+
+	set_user_nice(current, MIN_NICE);
+	current->flags |= PF_NOFREEZE;
+	phba->data_flags = 0;
+
+	while (!kthread_should_stop()) {
+		/* wait and check worker queue activities */
+		rc = wait_event_interruptible(phba->work_waitq,
+					(test_and_clear_bit(LPFC_DATA_READY,
+							    &phba->data_flags)
+					 || kthread_should_stop()));
+		/* Signal wakeup shall terminate the worker thread */
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+					"0433 Wakeup on signal: rc=x%x\n", rc);
+			break;
+		}
+
+		/* Attend pending lpfc data processing */
+		lpfc_work_done(phba);
+	}
+	phba->worker_thread = NULL;
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0432 Worker thread stopped.\n");
+	return 0;
+}
+
+/*
+ * This is only called to handle FC worker events. Since this a rare
+ * occurrence, we allocate a struct lpfc_work_evt structure here instead of
+ * embedding it in the IOCB.
+ */
+int
+lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
+		      uint32_t evt)
+{
+	struct lpfc_work_evt  *evtp;
+	unsigned long flags;
+
+	/*
+	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
+	 * be queued to worker thread for processing
+	 */
+	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
+	if (!evtp)
+		return 0;
+
+	evtp->evt_arg1  = arg1;
+	evtp->evt_arg2  = arg2;
+	evtp->evt       = evt;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_add_tail(&evtp->evt_listp, &phba->work_list);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	lpfc_worker_wake_up(phba);
+
+	return 1;
+}
+
+void
+lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
+			((vport->port_type == LPFC_NPIV_PORT) &&
+			(ndlp->nlp_DID == NameServer_DID)))
+			lpfc_unreg_rpi(vport, ndlp);
+
+		/* Leave Fabric nodes alone on link down */
+		if ((phba->sli_rev < LPFC_SLI_REV4) &&
+		    (!remove && ndlp->nlp_type & NLP_FABRIC))
+			continue;
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					remove
+					? NLP_EVT_DEVICE_RM
+					: NLP_EVT_DEVICE_RECOVERY);
+	}
+	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
+		lpfc_mbx_unreg_vpi(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
+	}
+}
+
+void
+lpfc_port_link_failure(struct lpfc_vport *vport)
+{
+	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+
+	/* Cleanup any outstanding received buffers */
+	lpfc_cleanup_rcv_buffers(vport);
+
+	/* Cleanup any outstanding RSCN activity */
+	lpfc_els_flush_rscn(vport);
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_cmd(vport);
+
+	lpfc_cleanup_rpis(vport, 0);
+
+	/* Turn off discovery timer if its running */
+	lpfc_can_disctmo(vport);
+}
+
+void
+lpfc_linkdown_port(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+		fc_host_post_event(shost, fc_get_event_number(),
+				   FCH_EVT_LINKDOWN, 0);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Link Down:       state:x%x rtry:x%x flg:x%x",
+		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+	lpfc_port_link_failure(vport);
+
+	/* Stop delayed Nport discovery */
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_DISC_DELAYED;
+	spin_unlock_irq(shost->host_lock);
+	del_timer_sync(&vport->delayed_disc_tmo);
+}
+
+int
+lpfc_linkdown(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_vport **vports;
+	LPFC_MBOXQ_t          *mb;
+	int i;
+
+	if (phba->link_state == LPFC_LINK_DOWN)
+		return 0;
+
+	/* Block all SCSI stack I/Os */
+	lpfc_scsi_dev_block(phba);
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+	spin_unlock_irq(&phba->hbalock);
+	if (phba->link_state > LPFC_LINK_DOWN) {
+		phba->link_state = LPFC_LINK_DOWN;
+		spin_lock_irq(shost->host_lock);
+		phba->pport->fc_flag &= ~FC_LBIT;
+		spin_unlock_irq(shost->host_lock);
+	}
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			/* Issue a LINK DOWN event to all nodes */
+			lpfc_linkdown_port(vports[i]);
+
+			vports[i]->fc_myDID = 0;
+
+			if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+			    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+				if (phba->nvmet_support)
+					lpfc_nvmet_update_targetport(phba);
+				else
+					lpfc_nvme_update_localport(vports[i]);
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Clean up any SLI3 firmware default rpi's */
+	if (phba->sli_rev > LPFC_SLI_REV3)
+		goto skip_unreg_did;
+
+	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mb) {
+		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
+		mb->vport = vport;
+		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+		    == MBX_NOT_FINISHED) {
+			mempool_free(mb, phba->mbox_mem_pool);
+		}
+	}
+
+ skip_unreg_did:
+	/* Setup myDID for link up if we are in pt2pt mode */
+	if (phba->pport->fc_flag & FC_PT2PT) {
+		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mb) {
+			lpfc_config_link(phba, mb);
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mb->vport = vport;
+			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+			    == MBX_NOT_FINISHED) {
+				mempool_free(mb, phba->mbox_mem_pool);
+			}
+		}
+		spin_lock_irq(shost->host_lock);
+		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+		spin_unlock_irq(shost->host_lock);
+	}
+	return 0;
+}
+
+static void
+lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		if (ndlp->nlp_type & NLP_FABRIC) {
+			/* On Linkup its safe to clean up the ndlp
+			 * from Fabric connections.
+			 */
+			if (ndlp->nlp_DID != Fabric_DID)
+				lpfc_unreg_rpi(vport, ndlp);
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+			/* Fail outstanding IO now since device is
+			 * marked for PLOGI.
+			 */
+			lpfc_unreg_rpi(vport, ndlp);
+		}
+	}
+}
+
+static void
+lpfc_linkup_port(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	if ((vport->load_flag & FC_UNLOADING) != 0)
+		return;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Link Up:         top:x%x speed:x%x flg:x%x",
+		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
+
+	/* If NPIV is not enabled, only bring the physical port up */
+	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+		(vport != phba->pport))
+		return;
+
+	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+		fc_host_post_event(shost, fc_get_event_number(),
+				   FCH_EVT_LINKUP, 0);
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+			    FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+	vport->fc_flag |= FC_NDISC_ACTIVE;
+	vport->fc_ns_retry = 0;
+	spin_unlock_irq(shost->host_lock);
+
+	if (vport->fc_flag & FC_LBIT)
+		lpfc_linkup_cleanup_nodes(vport);
+
+}
+
+static int
+lpfc_linkup(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	phba->link_state = LPFC_LINK_UP;
+
+	/* Unblock fabric iocbs if they are blocked */
+	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+	del_timer_sync(&phba->fabric_block_timer);
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_linkup_port(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	return 0;
+}
+
+/*
+ * This routine handles processing a CLEAR_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer. SLI3 only.
+ */
+static void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_sli   *psli = &phba->sli;
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint32_t control;
+
+	/* Since we don't do discovery right now, turn these off here */
+	psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+	psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+
+	/* Check for error */
+	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
+		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0320 CLEAR_LA mbxStatus error x%x hba "
+				 "state x%x\n",
+				 mb->mbxStatus, vport->port_state);
+		phba->link_state = LPFC_HBA_ERROR;
+		goto out;
+	}
+
+	if (vport->port_type == LPFC_PHYSICAL_PORT)
+		phba->link_state = LPFC_HBA_READY;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	control = readl(phba->HCregaddr);
+	control |= HC_LAINT_ENA;
+	writel(control, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+
+out:
+	/* Device Discovery completes */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0225 Device Discovery completes\n");
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_can_disctmo(vport);
+
+	/* turn on Link Attention interrupts */
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	control = readl(phba->HCregaddr);
+	control |= HC_LAINT_ENA;
+	writel(control, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+
+void
+lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	uint8_t bbscn = 0;
+
+	if (pmb->u.mb.mbxStatus)
+		goto out;
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* don't perform discovery for SLI4 loopback diagnostic test */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    !(phba->hba_flag & HBA_FCOE_MODE) &&
+	    (phba->link_flag & LS_LOOPBACK_MODE))
+		return;
+
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+	    vport->fc_flag & FC_PUBLIC_LOOP &&
+	    !(vport->fc_flag & FC_LBIT)) {
+			/* Need to wait for FAN - use discovery timer
+			 * for timeout.  port_state is identically
+			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
+			 */
+			lpfc_set_disctmo(vport);
+			return;
+	}
+
+	/* Start discovery by sending a FLOGI. port_state is identically
+	 * LPFC_FLOGI while waiting for FLOGI cmpl
+	 */
+	if (vport->port_state != LPFC_FLOGI) {
+		if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+			bbscn = bf_get(lpfc_bbscn_def,
+				       &phba->sli4_hba.bbscn_params);
+			vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
+			vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
+		}
+		lpfc_initial_flogi(vport);
+	} else if (vport->fc_flag & FC_PT2PT) {
+		lpfc_disc_start(vport);
+	}
+	return;
+
+out:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "0306 CONFIG_LINK mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 pmb->u.mb.mbxStatus, vport->port_state);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	lpfc_linkdown(phba);
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0200 CONFIG_LINK bad hba state x%x\n",
+			 vport->port_state);
+
+	lpfc_issue_clear_la(phba, vport);
+	return;
+}
+
+/**
+ * lpfc_sli4_clear_fcf_rr_bmask
+ * @phba pointer to the struct lpfc_hba for this port.
+ * This fucnction resets the round robin bit mask and clears the
+ * fcf priority list. The list deletions are done while holding the
+ * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
+ * from the lpfc_fcf_pri record.
+ **/
+void
+lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
+{
+	struct lpfc_fcf_pri *fcf_pri;
+	struct lpfc_fcf_pri *next_fcf_pri;
+	memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+				&phba->fcf.fcf_pri_list, list) {
+		list_del_init(&fcf_pri->list);
+		fcf_pri->fcf_rec.flag = 0;
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+static void
+lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "2017 REG_FCFI mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 mboxq->u.mb.mbxStatus, vport->port_state);
+		goto fail_out;
+	}
+
+	/* Start FCoE discovery by sending a FLOGI. */
+	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
+	/* Set the FCFI registered flag */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= FCF_REGISTERED;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* If there is a pending FCoE event, restart FCF table scan. */
+	if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
+		lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+		goto fail_out;
+
+	/* Mark successful completion of FCF table scan */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
+	phba->hba_flag &= ~FCF_TS_INPROG;
+	if (vport->port_state != LPFC_FLOGI) {
+		phba->hba_flag |= FCF_RR_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_issue_init_vfi(vport);
+		goto out;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	goto out;
+
+fail_out:
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~FCF_RR_INPROG;
+	spin_unlock_irq(&phba->hbalock);
+out:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_fab_name_match - Check if the fcf fabric name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's fabric name with provided
+ * fabric name. If the fabric name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
+{
+	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
+		return 0;
+	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
+		return 0;
+	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
+		return 0;
+	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
+		return 0;
+	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
+		return 0;
+	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
+		return 0;
+	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
+		return 0;
+	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+/**
+ * lpfc_sw_name_match - Check if the fcf switch name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's switch name with provided
+ * switch name. If the switch name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
+{
+	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
+		return 0;
+	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
+		return 0;
+	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
+		return 0;
+	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
+		return 0;
+	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
+		return 0;
+	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
+		return 0;
+	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
+		return 0;
+	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+/**
+ * lpfc_mac_addr_match - Check if the fcf mac address match.
+ * @mac_addr: pointer to mac address.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's mac address with HBA's
+ * FCF mac address. If the mac addresses are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
+{
+	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
+		return 0;
+	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
+		return 0;
+	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
+		return 0;
+	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
+		return 0;
+	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
+		return 0;
+	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+static bool
+lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
+{
+	return (curr_vlan_id == new_vlan_id);
+}
+
+/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: Index for the lpfc_fcf_record.
+ * @new_fcf_record: pointer to hba fcf record.
+ *
+ * This routine updates the driver FCF priority record from the new HBA FCF
+ * record. This routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
+				 struct fcf_record *new_fcf_record
+				 )
+{
+	struct lpfc_fcf_pri *fcf_pri;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	fcf_pri->fcf_rec.fcf_index = fcf_index;
+	/* FCF record priority */
+	fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+
+}
+
+/**
+ * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
+ * @fcf: pointer to driver fcf record.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine copies the FCF information from the FCF
+ * record to lpfc_hba data structure.
+ **/
+static void
+lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
+		     struct fcf_record *new_fcf_record)
+{
+	/* Fabric name */
+	fcf_rec->fabric_name[0] =
+		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
+	fcf_rec->fabric_name[1] =
+		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
+	fcf_rec->fabric_name[2] =
+		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
+	fcf_rec->fabric_name[3] =
+		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
+	fcf_rec->fabric_name[4] =
+		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
+	fcf_rec->fabric_name[5] =
+		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
+	fcf_rec->fabric_name[6] =
+		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
+	fcf_rec->fabric_name[7] =
+		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
+	/* Mac address */
+	fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+	fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+	fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+	fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+	fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+	fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+	/* FCF record index */
+	fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	/* FCF record priority */
+	fcf_rec->priority = new_fcf_record->fip_priority;
+	/* Switch name */
+	fcf_rec->switch_name[0] =
+		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
+	fcf_rec->switch_name[1] =
+		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
+	fcf_rec->switch_name[2] =
+		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
+	fcf_rec->switch_name[3] =
+		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
+	fcf_rec->switch_name[4] =
+		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
+	fcf_rec->switch_name[5] =
+		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
+	fcf_rec->switch_name[6] =
+		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
+	fcf_rec->switch_name[7] =
+		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
+}
+
+/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to driver fcf record.
+ * @new_fcf_record: pointer to hba fcf record.
+ * @addr_mode: address mode to be set to the driver fcf record.
+ * @vlan_id: vlan tag to be set to the driver fcf record.
+ * @flag: flag bits to be set to the driver fcf record.
+ *
+ * This routine updates the driver FCF record from the new HBA FCF record
+ * together with the address mode, vlan_id, and other informations. This
+ * routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
+		       struct fcf_record *new_fcf_record, uint32_t addr_mode,
+		       uint16_t vlan_id, uint32_t flag)
+{
+	lockdep_assert_held(&phba->hbalock);
+
+	/* Copy the fields from the HBA's FCF record */
+	lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
+	/* Update other fields of driver FCF record */
+	fcf_rec->addr_mode = addr_mode;
+	fcf_rec->vlan_id = vlan_id;
+	fcf_rec->flag |= (flag | RECORD_VALID);
+	__lpfc_update_fcf_record_pri(phba,
+		bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
+				 new_fcf_record);
+}
+
+/**
+ * lpfc_register_fcf - Register the FCF with hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a register fcfi mailbox command to register
+ * the fcf with HBA.
+ **/
+static void
+lpfc_register_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *fcf_mbxq;
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* If the FCF is not available do nothing. */
+	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* The FCF is already registered, start discovery */
+	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
+		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		if (phba->pport->port_state != LPFC_FLOGI &&
+		    phba->pport->fc_flag & FC_FABRIC) {
+			phba->hba_flag |= FCF_RR_INPROG;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_initial_flogi(phba->pport);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!fcf_mbxq) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	lpfc_reg_fcfi(phba, fcf_mbxq);
+	fcf_mbxq->vport = phba->pport;
+	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
+	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		spin_unlock_irq(&phba->hbalock);
+		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ * @boot_flag: Indicates if this record used by boot bios.
+ * @addr_mode: The address mode to be used by this FCF
+ * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
+ *
+ * This routine compare the fcf record with connect list obtained from the
+ * config region to decide if this FCF can be used for SAN discovery. It returns
+ * 1 if this record can be used for SAN discovery else return zero. If this FCF
+ * record can be used for SAN discovery, the boot_flag will indicate if this FCF
+ * is used by boot bios and addr_mode will indicate the addressing mode to be
+ * used for this FCF when the function returns.
+ * If the FCF record need to be used with a particular vlan id, the vlan is
+ * set in the vlan_id on return of the function. If not VLAN tagging need to
+ * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
+ **/
+static int
+lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
+			struct fcf_record *new_fcf_record,
+			uint32_t *boot_flag, uint32_t *addr_mode,
+			uint16_t *vlan_id)
+{
+	struct lpfc_fcf_conn_entry *conn_entry;
+	int i, j, fcf_vlan_id = 0;
+
+	/* Find the lowest VLAN id in the FCF record */
+	for (i = 0; i < 512; i++) {
+		if (new_fcf_record->vlan_bitmap[i]) {
+			fcf_vlan_id = i * 8;
+			j = 0;
+			while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
+				j++;
+				fcf_vlan_id++;
+			}
+			break;
+		}
+	}
+
+	/* FCF not valid/available or solicitation in progress */
+	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
+	    !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
+	    bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
+		return 0;
+
+	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+		*boot_flag = 0;
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record);
+		if (phba->valid_vlan)
+			*vlan_id = phba->vlan_id;
+		else
+			*vlan_id = LPFC_FCOE_NULL_VID;
+		return 1;
+	}
+
+	/*
+	 * If there are no FCF connection table entry, driver connect to all
+	 * FCFs.
+	 */
+	if (list_empty(&phba->fcf_conn_rec_list)) {
+		*boot_flag = 0;
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+			new_fcf_record);
+
+		/*
+		 * When there are no FCF connect entries, use driver's default
+		 * addressing mode - FPMA.
+		 */
+		if (*addr_mode & LPFC_FCF_FPMA)
+			*addr_mode = LPFC_FCF_FPMA;
+
+		/* If FCF record report a vlan id use that vlan id */
+		if (fcf_vlan_id)
+			*vlan_id = fcf_vlan_id;
+		else
+			*vlan_id = LPFC_FCOE_NULL_VID;
+		return 1;
+	}
+
+	list_for_each_entry(conn_entry,
+			    &phba->fcf_conn_rec_list, list) {
+		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
+			continue;
+
+		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
+			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
+					     new_fcf_record))
+			continue;
+		if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
+			!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
+					    new_fcf_record))
+			continue;
+		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
+			/*
+			 * If the vlan bit map does not have the bit set for the
+			 * vlan id to be used, then it is not a match.
+			 */
+			if (!(new_fcf_record->vlan_bitmap
+				[conn_entry->conn_rec.vlan_tag / 8] &
+				(1 << (conn_entry->conn_rec.vlan_tag % 8))))
+				continue;
+		}
+
+		/*
+		 * If connection record does not support any addressing mode,
+		 * skip the FCF record.
+		 */
+		if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
+			& (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
+			continue;
+
+		/*
+		 * Check if the connection record specifies a required
+		 * addressing mode.
+		 */
+		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
+
+			/*
+			 * If SPMA required but FCF not support this continue.
+			 */
+			if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+				!(bf_get(lpfc_fcf_record_mac_addr_prov,
+					new_fcf_record) & LPFC_FCF_SPMA))
+				continue;
+
+			/*
+			 * If FPMA required but FCF not support this continue.
+			 */
+			if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+				!(bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record) & LPFC_FCF_FPMA))
+				continue;
+		}
+
+		/*
+		 * This fcf record matches filtering criteria.
+		 */
+		if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
+			*boot_flag = 1;
+		else
+			*boot_flag = 0;
+
+		/*
+		 * If user did not specify any addressing mode, or if the
+		 * preferred addressing mode specified by user is not supported
+		 * by FCF, allow fabric to pick the addressing mode.
+		 */
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record);
+		/*
+		 * If the user specified a required address mode, assign that
+		 * address mode
+		 */
+		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
+			*addr_mode = (conn_entry->conn_rec.flags &
+				FCFCNCT_AM_SPMA) ?
+				LPFC_FCF_SPMA : LPFC_FCF_FPMA;
+		/*
+		 * If the user specified a preferred address mode, use the
+		 * addr mode only if FCF support the addr_mode.
+		 */
+		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+			(*addr_mode & LPFC_FCF_SPMA))
+				*addr_mode = LPFC_FCF_SPMA;
+		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+			!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+			(*addr_mode & LPFC_FCF_FPMA))
+				*addr_mode = LPFC_FCF_FPMA;
+
+		/* If matching connect list has a vlan id, use it */
+		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
+			*vlan_id = conn_entry->conn_rec.vlan_tag;
+		/*
+		 * If no vlan id is specified in connect list, use the vlan id
+		 * in the FCF record
+		 */
+		else if (fcf_vlan_id)
+			*vlan_id = fcf_vlan_id;
+		else
+			*vlan_id = LPFC_FCOE_NULL_VID;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
+ * @phba: pointer to lpfc hba data structure.
+ * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
+ *
+ * This function check if there is any fcoe event pending while driver
+ * scan FCF entries. If there is any pending event, it will restart the
+ * FCF saning and return 1 else return 0.
+ */
+int
+lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
+{
+	/*
+	 * If the Link is up and no FCoE events while in the
+	 * FCF discovery, no need to restart FCF discovery.
+	 */
+	if ((phba->link_state  >= LPFC_LINK_UP) &&
+	    (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+		return 0;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2768 Pending link or FCF event during current "
+			"handling of the previous event: link_state:x%x, "
+			"evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
+			phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
+			phba->fcoe_eventtag);
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
+	spin_unlock_irq(&phba->hbalock);
+
+	if (phba->link_state >= LPFC_LINK_UP) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2780 Restart FCF table scan due to "
+				"pending FCF event:evt_tag_at_scan:x%x, "
+				"evt_tag_current:x%x\n",
+				phba->fcoe_eventtag_at_fcf_scan,
+				phba->fcoe_eventtag);
+		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+	} else {
+		/*
+		 * Do not continue FCF discovery and clear FCF_TS_INPROG
+		 * flag
+		 */
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2833 Stop FCF discovery process due to link "
+				"state change (x%x)\n", phba->link_state);
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Unregister the currently registered FCF if required */
+	if (unreg_fcf) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_sli4_unregister_fcf(phba);
+	}
+	return 1;
+}
+
+/**
+ * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_cnt: number of eligible fcf record seen so far.
+ *
+ * This function makes an running random selection decision on FCF record to
+ * use through a sequence of @fcf_cnt eligible FCF records with equal
+ * probability. To perform integer manunipulation of random numbers with
+ * size unit32_t, the lower 16 bits of the 32-bit random number returned
+ * from prandom_u32() are taken as the random random number generated.
+ *
+ * Returns true when outcome is for the newly read FCF record should be
+ * chosen; otherwise, return false when outcome is for keeping the previously
+ * chosen FCF record.
+ **/
+static bool
+lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
+{
+	uint32_t rand_num;
+
+	/* Get 16-bit uniform random number */
+	rand_num = 0xFFFF & prandom_u32();
+
+	/* Decision with probability 1/fcf_cnt */
+	if ((fcf_cnt * rand_num) < 0xFFFF)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ * @next_fcf_index: pointer to holder of next fcf index.
+ *
+ * This routine parses the non-embedded fcf mailbox command by performing the
+ * necessarily error checking, non-embedded read FCF record mailbox command
+ * SGE parsing, and endianness swapping.
+ *
+ * Returns the pointer to the new FCF record in the non-embedded mailbox
+ * command DMA memory if successfully, other NULL.
+ */
+static struct fcf_record *
+lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+			     uint16_t *next_fcf_index)
+{
+	void *virt_addr;
+	struct lpfc_mbx_sge sge;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+	uint32_t shdr_status, shdr_add_status, if_type;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct fcf_record *new_fcf_record;
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	if (unlikely(!mboxq->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2524 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		return NULL;
+	}
+	virt_addr = mboxq->sge_array->addr[0];
+
+	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+	lpfc_sli_pcimem_bcopy(shdr, shdr,
+			      sizeof(union lpfc_sli4_cfg_shdr));
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status) {
+		if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
+					if_type == LPFC_SLI_INTF_IF_TYPE_2)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2726 READ_FCF_RECORD Indicates empty "
+					"FCF table.\n");
+		else
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2521 READ_FCF_RECORD mailbox failed "
+					"with status x%x add_status x%x, "
+					"mbx\n", shdr_status, shdr_add_status);
+		return NULL;
+	}
+
+	/* Interpreting the returned information of the FCF record */
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
+			      sizeof(struct lpfc_mbx_read_fcf_tbl));
+	*next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
+	new_fcf_record = (struct fcf_record *)(virt_addr +
+			  sizeof(struct lpfc_mbx_read_fcf_tbl));
+	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
+				offsetof(struct fcf_record, vlan_bitmap));
+	new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
+	new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
+
+	return new_fcf_record;
+}
+
+/**
+ * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record.
+ * @vlan_id: the lowest vlan identifier associated to this fcf record.
+ * @next_fcf_index: the index to the next fcf record in hba's fcf table.
+ *
+ * This routine logs the detailed FCF record if the LOG_FIP loggin is
+ * enabled.
+ **/
+static void
+lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
+			      struct fcf_record *fcf_record,
+			      uint16_t vlan_id,
+			      uint16_t next_fcf_index)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2764 READ_FCF_RECORD:\n"
+			"\tFCF_Index     : x%x\n"
+			"\tFCF_Avail     : x%x\n"
+			"\tFCF_Valid     : x%x\n"
+			"\tFCF_SOL       : x%x\n"
+			"\tFIP_Priority  : x%x\n"
+			"\tMAC_Provider  : x%x\n"
+			"\tLowest VLANID : x%x\n"
+			"\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
+			"\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+			"\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+			"\tNext_FCF_Index: x%x\n",
+			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
+			fcf_record->fip_priority,
+			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
+			vlan_id,
+			bf_get(lpfc_fcf_record_mac_0, fcf_record),
+			bf_get(lpfc_fcf_record_mac_1, fcf_record),
+			bf_get(lpfc_fcf_record_mac_2, fcf_record),
+			bf_get(lpfc_fcf_record_mac_3, fcf_record),
+			bf_get(lpfc_fcf_record_mac_4, fcf_record),
+			bf_get(lpfc_fcf_record_mac_5, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
+			next_fcf_index);
+}
+
+/**
+ lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to an existing FCF record.
+ * @new_fcf_record: pointer to a new FCF record.
+ * @new_vlan_id: vlan id from the new FCF record.
+ *
+ * This function performs matching test of a new FCF record against an existing
+ * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
+ * will not be used as part of the FCF record matching criteria.
+ *
+ * Returns true if all the fields matching, otherwise returns false.
+ */
+static bool
+lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
+			   struct lpfc_fcf_rec *fcf_rec,
+			   struct fcf_record *new_fcf_record,
+			   uint16_t new_vlan_id)
+{
+	if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
+		if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
+			return false;
+	if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
+		return false;
+	if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
+		return false;
+	if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
+		return false;
+	if (fcf_rec->priority != new_fcf_record->fip_priority)
+		return false;
+	return true;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ *         1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+	struct lpfc_hba *phba = vport->phba;
+	int rc;
+
+	if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2872 Devloss tmo with no eligible "
+					"FCF, unregister in-use FCF (x%x) "
+					"and rescan FCF table\n",
+					phba->fcf.current_rec.fcf_indx);
+			lpfc_unregister_fcf_rescan(phba);
+			goto stop_flogi_current_fcf;
+		}
+		/* Mark the end to FLOGI roundrobin failover */
+		phba->hba_flag &= ~FCF_RR_INPROG;
+		/* Allow action to new fcf asynchronous event */
+		phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2865 No FCF available, stop roundrobin FCF "
+				"failover and change port state:x%x/x%x\n",
+				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+		if (!phba->fcf.fcf_redisc_attempted) {
+			lpfc_unregister_fcf(phba);
+
+			rc = lpfc_sli4_redisc_fcf_table(phba);
+			if (!rc) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"3195 Rediscover FCF table\n");
+				phba->fcf.fcf_redisc_attempted = 1;
+				lpfc_sli4_clear_fcf_rr_bmask(phba);
+			} else {
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+						"3196 Rediscover FCF table "
+						"failed. Status:x%x\n", rc);
+			}
+		} else {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+					"3197 Already rediscover FCF table "
+					"attempted. No more retry\n");
+		}
+		goto stop_flogi_current_fcf;
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+				"2794 Try FLOGI roundrobin FCF failover to "
+				"(x%x)\n", fcf_index);
+		rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+		if (rc)
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+					"2761 FLOGI roundrobin FCF failover "
+					"failed (rc:x%x) to read FCF (x%x)\n",
+					rc, phba->fcf.current_rec.fcf_indx);
+		else
+			goto stop_flogi_current_fcf;
+	}
+	return 0;
+
+stop_flogi_current_fcf:
+	lpfc_can_disctmo(vport);
+	return 1;
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_del
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to delete
+ * This routine checks the on list flag of the fcf_index to be deleted.
+ * If it is one the list then it is removed from the list, and the flag
+ * is cleared. This routine grab the hbalock before removing the fcf
+ * record from the list.
+ **/
+static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
+			uint16_t fcf_index)
+{
+	struct lpfc_fcf_pri *new_fcf_pri;
+
+	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+		"3058 deleting idx x%x pri x%x flg x%x\n",
+		fcf_index, new_fcf_pri->fcf_rec.priority,
+		 new_fcf_pri->fcf_rec.flag);
+	spin_lock_irq(&phba->hbalock);
+	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
+		if (phba->fcf.current_rec.priority ==
+				new_fcf_pri->fcf_rec.priority)
+			phba->fcf.eligible_fcf_cnt--;
+		list_del_init(&new_fcf_pri->list);
+		new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_set_fcf_flogi_fail
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to update
+ * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
+ * flag so the the round robin slection for the particular priority level
+ * will try a different fcf record that does not have this bit set.
+ * If the fcf record is re-read for any reason this flag is cleared brfore
+ * adding it to the priority list.
+ **/
+void
+lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	struct lpfc_fcf_pri *new_fcf_pri;
+	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	spin_lock_irq(&phba->hbalock);
+	new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_add
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to add
+ * This routine checks the priority of the fcf_index to be added.
+ * If it is a lower priority than the current head of the fcf_pri list
+ * then it is added to the list in the right order.
+ * If it is the same priority as the current head of the list then it
+ * is added to the head of the list and its bit in the rr_bmask is set.
+ * If the fcf_index to be added is of a higher priority than the current
+ * head of the list then the rr_bmask is cleared, its bit is set in the
+ * rr_bmask and it is added to the head of the list.
+ * returns:
+ * 0=success 1=failure
+ **/
+static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
+	uint16_t fcf_index,
+	struct fcf_record *new_fcf_record)
+{
+	uint16_t current_fcf_pri;
+	uint16_t last_index;
+	struct lpfc_fcf_pri *fcf_pri;
+	struct lpfc_fcf_pri *next_fcf_pri;
+	struct lpfc_fcf_pri *new_fcf_pri;
+	int ret;
+
+	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+		"3059 adding idx x%x pri x%x flg x%x\n",
+		fcf_index, new_fcf_record->fip_priority,
+		 new_fcf_pri->fcf_rec.flag);
+	spin_lock_irq(&phba->hbalock);
+	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
+		list_del_init(&new_fcf_pri->list);
+	new_fcf_pri->fcf_rec.fcf_index = fcf_index;
+	new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+	if (list_empty(&phba->fcf.fcf_pri_list)) {
+		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+		ret = lpfc_sli4_fcf_rr_index_set(phba,
+				new_fcf_pri->fcf_rec.fcf_index);
+		goto out;
+	}
+
+	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+				LPFC_SLI4_FCF_TBL_INDX_MAX);
+	if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		ret = 0; /* Empty rr list */
+		goto out;
+	}
+	current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
+	if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
+		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+		if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
+			memset(phba->fcf.fcf_rr_bmask, 0,
+				sizeof(*phba->fcf.fcf_rr_bmask));
+			/* fcfs_at_this_priority_level = 1; */
+			phba->fcf.eligible_fcf_cnt = 1;
+		} else
+			/* fcfs_at_this_priority_level++; */
+			phba->fcf.eligible_fcf_cnt++;
+		ret = lpfc_sli4_fcf_rr_index_set(phba,
+				new_fcf_pri->fcf_rec.fcf_index);
+		goto out;
+	}
+
+	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+				&phba->fcf.fcf_pri_list, list) {
+		if (new_fcf_pri->fcf_rec.priority <=
+				fcf_pri->fcf_rec.priority) {
+			if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
+				list_add(&new_fcf_pri->list,
+						&phba->fcf.fcf_pri_list);
+			else
+				list_add(&new_fcf_pri->list,
+					 &((struct lpfc_fcf_pri *)
+					fcf_pri->list.prev)->list);
+			ret = 0;
+			goto out;
+		} else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
+			|| new_fcf_pri->fcf_rec.priority <
+				next_fcf_pri->fcf_rec.priority) {
+			list_add(&new_fcf_pri->list, &fcf_pri->list);
+			ret = 0;
+			goto out;
+		}
+		if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
+			continue;
+
+	}
+	ret = 1;
+out:
+	/* we use = instead of |= to clear the FLOGI_FAILED flag. */
+	new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
+	spin_unlock_irq(&phba->hbalock);
+	return ret;
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterates through all the fcf records available in
+ * HBA and chooses the optimal FCF record for discovery. After finding
+ * the FCF for discovery it registers the FCF record and kicks start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
+ * use an FCF record which matches fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver supports only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t fcf_index, next_fcf_index;
+	struct lpfc_fcf_rec *fcf_rec = NULL;
+	uint16_t vlan_id = LPFC_FCOE_NULL_VID;
+	bool select_new_fcf;
+	int rc;
+
+	/* If there is pending FCoE event restart FCF table scan */
+	if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2765 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		/* Let next new FCF event trigger fast failover */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Check the FCF record against the connection list */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	/*
+	 * If the fcf record does not match with connect list entries
+	 * read the next entry; otherwise, this is an eligible FCF
+	 * record for roundrobin FCF failover.
+	 */
+	if (!rc) {
+		lpfc_sli4_fcf_pri_list_del(phba,
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record));
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2781 FCF (x%x) failed connection "
+				"list check: (x%x/x%x/%x)\n",
+				bf_get(lpfc_fcf_record_fcf_index,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_avail,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_valid,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_sol,
+				       new_fcf_record));
+		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
+		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+		    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+			    phba->fcf.current_rec.fcf_indx) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2862 FCF (x%x) matches property "
+					"of in-use FCF (x%x)\n",
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record),
+					phba->fcf.current_rec.fcf_indx);
+				goto read_next_fcf;
+			}
+			/*
+			 * In case the current in-use FCF record becomes
+			 * invalid/unavailable during FCF discovery that
+			 * was not triggered by fast FCF failover process,
+			 * treat it as fast FCF failover.
+			 */
+			if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
+			    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+						"2835 Invalid in-use FCF "
+						"(x%x), enter FCF failover "
+						"table scan.\n",
+						phba->fcf.current_rec.fcf_indx);
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_sli4_mbox_cmd_free(phba, mboxq);
+				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						LPFC_FCOE_FCF_GET_FIRST);
+				return;
+			}
+		}
+		goto read_next_fcf;
+	} else {
+		fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+		rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
+							new_fcf_record);
+		if (rc)
+			goto read_next_fcf;
+	}
+
+	/*
+	 * If this is not the first FCF discovery of the HBA, use last
+	 * FCF record for the discovery. The condition that a rescan
+	 * matches the in-use FCF record: fabric name, switch name, mac
+	 * address, and vlan_id.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->fcf.fcf_flag & FCF_IN_USE) {
+		if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+			lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+		    new_fcf_record, vlan_id)) {
+			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+			    phba->fcf.current_rec.fcf_indx) {
+				phba->fcf.fcf_flag |= FCF_AVAILABLE;
+				if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+					/* Stop FCF redisc wait timer */
+					__lpfc_sli4_stop_fcf_redisc_wait_timer(
+									phba);
+				else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+					/* Fast failover, mark completed */
+					phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"2836 New FCF matches in-use "
+						"FCF (x%x), port_state:x%x, "
+						"fc_flag:x%x\n",
+						phba->fcf.current_rec.fcf_indx,
+						phba->pport->port_state,
+						phba->pport->fc_flag);
+				goto out;
+			} else
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2863 New FCF (x%x) matches "
+					"property of in-use FCF (x%x)\n",
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record),
+					phba->fcf.current_rec.fcf_indx);
+		}
+		/*
+		 * Read next FCF record from HBA searching for the matching
+		 * with in-use record only if not during the fast failover
+		 * period. In case of fast failover period, it shall try to
+		 * determine whether the FCF record just read should be the
+		 * next candidate.
+		 */
+		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+			spin_unlock_irq(&phba->hbalock);
+			goto read_next_fcf;
+		}
+	}
+	/*
+	 * Update on failover FCF record only if it's in FCF fast-failover
+	 * period; otherwise, update on current FCF record.
+	 */
+	if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+		fcf_rec = &phba->fcf.failover_rec;
+	else
+		fcf_rec = &phba->fcf.current_rec;
+
+	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
+		/*
+		 * If the driver FCF record does not have boot flag
+		 * set and new hba fcf record has boot flag set, use
+		 * the new hba fcf record.
+		 */
+		if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
+			/* Choose this FCF record */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2837 Update current FCF record "
+					"(x%x) with new FCF record (x%x)\n",
+					fcf_rec->fcf_indx,
+					bf_get(lpfc_fcf_record_fcf_index,
+					new_fcf_record));
+			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					addr_mode, vlan_id, BOOT_ENABLE);
+			spin_unlock_irq(&phba->hbalock);
+			goto read_next_fcf;
+		}
+		/*
+		 * If the driver FCF record has boot flag set and the
+		 * new hba FCF record does not have boot flag, read
+		 * the next FCF record.
+		 */
+		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
+			spin_unlock_irq(&phba->hbalock);
+			goto read_next_fcf;
+		}
+		/*
+		 * If the new hba FCF record has lower priority value
+		 * than the driver FCF record, use the new record.
+		 */
+		if (new_fcf_record->fip_priority < fcf_rec->priority) {
+			/* Choose the new FCF record with lower priority */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2838 Update current FCF record "
+					"(x%x) with new FCF record (x%x)\n",
+					fcf_rec->fcf_indx,
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record));
+			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					addr_mode, vlan_id, 0);
+			/* Reset running random FCF selection count */
+			phba->fcf.eligible_fcf_cnt = 1;
+		} else if (new_fcf_record->fip_priority == fcf_rec->priority) {
+			/* Update running random FCF selection count */
+			phba->fcf.eligible_fcf_cnt++;
+			select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
+						phba->fcf.eligible_fcf_cnt);
+			if (select_new_fcf) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2839 Update current FCF record "
+					"(x%x) with new FCF record (x%x)\n",
+					fcf_rec->fcf_indx,
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record));
+				/* Choose the new FCF by random selection */
+				__lpfc_update_fcf_record(phba, fcf_rec,
+							 new_fcf_record,
+							 addr_mode, vlan_id, 0);
+			}
+		}
+		spin_unlock_irq(&phba->hbalock);
+		goto read_next_fcf;
+	}
+	/*
+	 * This is the first suitable FCF record, choose this record for
+	 * initial best-fit FCF.
+	 */
+	if (fcf_rec) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2840 Update initial FCF candidate "
+				"with FCF (x%x)\n",
+				bf_get(lpfc_fcf_record_fcf_index,
+				       new_fcf_record));
+		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					 addr_mode, vlan_id, (boot_flag ?
+					 BOOT_ENABLE : 0));
+		phba->fcf.fcf_flag |= FCF_AVAILABLE;
+		/* Setup initial running random FCF selection count */
+		phba->fcf.eligible_fcf_cnt = 1;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	goto read_next_fcf;
+
+read_next_fcf:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
+		if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+			/*
+			 * Case of FCF fast failover scan
+			 */
+
+			/*
+			 * It has not found any suitable FCF record, cancel
+			 * FCF scan inprogress, and do nothing
+			 */
+			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+					       "2782 No suitable FCF found: "
+					       "(x%x/x%x)\n",
+					       phba->fcoe_eventtag_at_fcf_scan,
+					       bf_get(lpfc_fcf_record_fcf_index,
+						      new_fcf_record));
+				spin_lock_irq(&phba->hbalock);
+				if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+					phba->hba_flag &= ~FCF_TS_INPROG;
+					spin_unlock_irq(&phba->hbalock);
+					/* Unregister in-use FCF and rescan */
+					lpfc_printf_log(phba, KERN_INFO,
+							LOG_FIP,
+							"2864 On devloss tmo "
+							"unreg in-use FCF and "
+							"rescan FCF table\n");
+					lpfc_unregister_fcf_rescan(phba);
+					return;
+				}
+				/*
+				 * Let next new FCF event trigger fast failover
+				 */
+				phba->hba_flag &= ~FCF_TS_INPROG;
+				spin_unlock_irq(&phba->hbalock);
+				return;
+			}
+			/*
+			 * It has found a suitable FCF record that is not
+			 * the same as in-use FCF record, unregister the
+			 * in-use FCF record, replace the in-use FCF record
+			 * with the new FCF record, mark FCF fast failover
+			 * completed, and then start register the new FCF
+			 * record.
+			 */
+
+			/* Unregister the current in-use FCF record */
+			lpfc_unregister_fcf(phba);
+
+			/* Replace in-use record with the new record */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2842 Replace in-use FCF (x%x) "
+					"with failover FCF (x%x)\n",
+					phba->fcf.current_rec.fcf_indx,
+					phba->fcf.failover_rec.fcf_indx);
+			memcpy(&phba->fcf.current_rec,
+			       &phba->fcf.failover_rec,
+			       sizeof(struct lpfc_fcf_rec));
+			/*
+			 * Mark the fast FCF failover rediscovery completed
+			 * and the start of the first round of the roundrobin
+			 * FCF failover.
+			 */
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+			spin_unlock_irq(&phba->hbalock);
+			/* Register to the new FCF record */
+			lpfc_register_fcf(phba);
+		} else {
+			/*
+			 * In case of transaction period to fast FCF failover,
+			 * do nothing when search to the end of the FCF table.
+			 */
+			if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
+			    (phba->fcf.fcf_flag & FCF_REDISC_PEND))
+				return;
+
+			if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+				phba->fcf.fcf_flag & FCF_IN_USE) {
+				/*
+				 * In case the current in-use FCF record no
+				 * longer existed during FCF discovery that
+				 * was not triggered by fast FCF failover
+				 * process, treat it as fast FCF failover.
+				 */
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"2841 In-use FCF record (x%x) "
+						"not reported, entering fast "
+						"FCF failover mode scanning.\n",
+						phba->fcf.current_rec.fcf_indx);
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						LPFC_FCOE_FCF_GET_FIRST);
+				return;
+			}
+			/* Register to the new FCF record */
+			lpfc_register_fcf(phba);
+		}
+	} else
+		lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
+	return;
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	lpfc_register_fcf(phba);
+
+	return;
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function for FLOGI failure roundrobin FCF failover
+ * read FCF record mailbox command from the eligible FCF record bmask for
+ * performing the failover. If the FCF read back is not valid/available, it
+ * fails through to retrying FLOGI to the currently registered FCF again.
+ * Otherwise, if the FCF read back is valid and available, it will set the
+ * newly read FCF record to the failover FCF record, unregister currently
+ * registered FCF record, copy the failover FCF record to the current
+ * FCF record, and then register the current FCF record before proceeding
+ * to trying FLOGI on the new failover FCF.
+ */
+void
+lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t next_fcf_index, fcf_index;
+	uint16_t current_fcf_index;
+	uint16_t vlan_id;
+	int rc;
+
+	/* If link state is not up, stop the roundrobin failover process */
+	if (phba->link_state < LPFC_LINK_UP) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+		phba->hba_flag &= ~FCF_RR_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+		goto out;
+	}
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2766 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record. "
+				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
+				phba->fcf.fcf_flag);
+		lpfc_unregister_fcf_rescan(phba);
+		goto out;
+	}
+
+	/* Get the needed parameters from FCF record */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	if (!rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2848 Remove ineligible FCF (x%x) from "
+				"from roundrobin bmask\n", fcf_index);
+		/* Clear roundrobin bmask bit for ineligible FCF */
+		lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+		/* Perform next round of roundrobin FCF failover */
+		fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+		rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+		if (rc)
+			goto out;
+		goto error_out;
+	}
+
+	if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2760 Perform FLOGI roundrobin FCF failover: "
+				"FCF (x%x) back to FCF (x%x)\n",
+				phba->fcf.current_rec.fcf_indx, fcf_index);
+		/* Wait 500 ms before retrying FLOGI to current FCF */
+		msleep(500);
+		lpfc_issue_init_vfi(phba->pport);
+		goto out;
+	}
+
+	/* Upload new FCF record to the failover FCF record */
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2834 Update current FCF (x%x) with new FCF (x%x)\n",
+			phba->fcf.failover_rec.fcf_indx, fcf_index);
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
+				 new_fcf_record, addr_mode, vlan_id,
+				 (boot_flag ? BOOT_ENABLE : 0));
+	spin_unlock_irq(&phba->hbalock);
+
+	current_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+	/* Unregister the current in-use FCF record */
+	lpfc_unregister_fcf(phba);
+
+	/* Replace in-use record with the new record */
+	memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
+	       sizeof(struct lpfc_fcf_rec));
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2783 Perform FLOGI roundrobin FCF failover: FCF "
+			"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
+
+error_out:
+	lpfc_register_fcf(phba);
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function of read FCF record mailbox command for
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
+ * failover when a new FCF event happened. If the FCF read back is
+ * valid/available and it passes the connection list check, it updates
+ * the bmask for the eligible FCF record for roundrobin failover.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t fcf_index, next_fcf_index;
+	uint16_t vlan_id;
+	int rc;
+
+	/* If link state is not up, no need to proceed */
+	if (phba->link_state < LPFC_LINK_UP)
+		goto out;
+
+	/* If FCF discovery period is over, no need to proceed */
+	if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
+		goto out;
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2767 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		goto out;
+	}
+
+	/* Check the connection list for eligibility */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	if (!rc)
+		goto out;
+
+	/* Update the eligible FCF record index bmask */
+	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+
+	rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vfi mailbox command.
+ */
+static void
+lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	/*
+	 * VFI not supported on interface type 0, just do the flogi
+	 * Also continue if the VFI is in use - just use the same one.
+	 */
+	if (mboxq->u.mb.mbxStatus &&
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+			LPFC_SLI_INTF_IF_TYPE_0) &&
+	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2891 Init VFI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+
+	lpfc_initial_flogi(vport);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vfi mailbox command to initialize the VFI and
+ * VPI for the physical port.
+ */
+void
+lpfc_issue_init_vfi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+	struct lpfc_hba *phba = vport->phba;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2892 Failed to allocate "
+			"init_vfi mailbox\n");
+		return;
+	}
+	lpfc_init_vfi(mboxq, vport);
+	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
+/**
+ * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vpi mailbox command.
+ */
+void
+lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2609 Init VPI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	/* If this port is physical port or FDISC is done, do reg_vpi */
+	if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
+			ndlp = lpfc_findnode_did(vport, Fabric_DID);
+			if (!ndlp)
+				lpfc_printf_vlog(vport, KERN_ERR,
+					LOG_DISCOVERY,
+					"2731 Cannot find fabric "
+					"controller node\n");
+			else
+				lpfc_register_new_vport(phba, vport, ndlp);
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			return;
+	}
+
+	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+		lpfc_initial_fdisc(vport);
+	else {
+		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2606 No NPIV Fabric support\n");
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vpi mailbox command to initialize
+ * VPI for the vport.
+ */
+void
+lpfc_issue_init_vpi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc, vpi;
+
+	if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+		vpi = lpfc_alloc_vpi(vport->phba);
+		if (!vpi) {
+			lpfc_printf_vlog(vport, KERN_ERR,
+					 LOG_MBOX,
+					 "3303 Failed to obtain vport vpi\n");
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			return;
+		}
+		vport->vpi = vpi;
+	}
+
+	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2607 Failed to allocate "
+			"init_vpi mailbox\n");
+		return;
+	}
+	lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
+	mboxq->vport = vport;
+	mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+	rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
+/**
+ * lpfc_start_fdiscs - send fdiscs for each vports on this port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function loops through the list of vports on the @phba and issues an
+ * FDISC if possible.
+ */
+void
+lpfc_start_fdiscs(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
+			/* There are no vpi for this vport */
+			if (vports[i]->vpi > phba->max_vpi) {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_FAILED);
+				continue;
+			}
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_LINKDOWN);
+				continue;
+			}
+			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+				lpfc_issue_init_vpi(vports[i]);
+				continue;
+			}
+			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+				lpfc_initial_fdisc(vports[i]);
+			else {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_NO_FABRIC_SUPP);
+				lpfc_printf_vlog(vports[i], KERN_ERR,
+						 LOG_ELS,
+						 "0259 No NPIV "
+						 "Fabric support\n");
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_dmabuf *dmabuf = mboxq->context1;
+	struct lpfc_vport *vport = mboxq->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/*
+	 * VFI not supported for interface type 0, so ignore any mailbox
+	 * error (except VFI in use) and continue with the discovery.
+	 */
+	if (mboxq->u.mb.mbxStatus &&
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+			LPFC_SLI_INTF_IF_TYPE_0) &&
+	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "2018 REG_VFI mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 mboxq->u.mb.mbxStatus, vport->port_state);
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			/* FLOGI failed, use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			goto out_free_mem;
+		}
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		goto out_free_mem;
+	}
+
+	/* If the VFI is already registered, there is nothing else to do
+	 * Unless this was a VFI update and we are in PT2PT mode, then
+	 * we should drop through to set the port state to ready.
+	 */
+	if (vport->fc_flag & FC_VFI_REGISTERED)
+		if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+		      vport->fc_flag & FC_PT2PT))
+			goto out_free_mem;
+
+	/* The VPI is implicitly registered when the VFI is registered */
+	spin_lock_irq(shost->host_lock);
+	vport->vpi_state |= LPFC_VPI_REGISTERED;
+	vport->fc_flag |= FC_VFI_REGISTERED;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	/* In case SLI4 FC loopback test, we are ready */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    (phba->link_flag & LS_LOOPBACK_MODE)) {
+		phba->link_state = LPFC_HBA_READY;
+		goto out_free_mem;
+	}
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
+			 "alpacnt:%d LinkState:%x topology:%x\n",
+			 vport->port_state, vport->fc_flag, vport->fc_myDID,
+			 vport->phba->alpa_map[0],
+			 phba->link_state, phba->fc_topology);
+
+	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+		/*
+		 * For private loop or for NPort pt2pt,
+		 * just start discovery and we are done.
+		 */
+		if ((vport->fc_flag & FC_PT2PT) ||
+		    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+		    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+
+			/* Use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			if (vport->fc_flag & FC_PT2PT)
+				vport->port_state = LPFC_VPORT_READY;
+			else
+				lpfc_disc_start(vport);
+		} else {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		}
+	}
+
+out_free_mem:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	if (dmabuf) {
+		lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+	return;
+}
+
+static void
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+	struct lpfc_vport  *vport = pmb->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct serv_parm *sp = &vport->fc_sparam;
+	uint32_t ed_tov;
+
+	/* Check for error */
+	if (mb->mbxStatus) {
+		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0319 READ_SPARAM mbxStatus error x%x "
+				 "hba state x%x>\n",
+				 mb->mbxStatus, vport->port_state);
+		lpfc_linkdown(phba);
+		goto out;
+	}
+
+	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
+	       sizeof (struct serv_parm));
+
+	ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
+		ed_tov = (ed_tov + 999999) / 1000000;
+
+	phba->fc_edtov = ed_tov;
+	phba->fc_ratov = (2 * ed_tov) / 1000;
+	if (phba->fc_ratov < FF_DEF_RATOV) {
+		/* RA_TOV should be atleast 10sec for initial flogi */
+		phba->fc_ratov = FF_DEF_RATOV;
+	}
+
+	lpfc_update_vport_wwn(vport);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
+		memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
+	}
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+
+out:
+	pmb->context1 = NULL;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	lpfc_issue_clear_la(phba, vport);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+static void
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+{
+	struct lpfc_vport *vport = phba->pport;
+	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+	struct Scsi_Host *shost;
+	int i;
+	struct lpfc_dmabuf *mp;
+	int rc;
+	struct fcf_record *fcf_record;
+	uint32_t fc_flags = 0;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
+
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
+		case LPFC_LINK_SPEED_1GHZ:
+		case LPFC_LINK_SPEED_2GHZ:
+		case LPFC_LINK_SPEED_4GHZ:
+		case LPFC_LINK_SPEED_8GHZ:
+		case LPFC_LINK_SPEED_10GHZ:
+		case LPFC_LINK_SPEED_16GHZ:
+		case LPFC_LINK_SPEED_32GHZ:
+			break;
+		default:
+			phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
+			break;
+		}
+	}
+
+	if (phba->fc_topology &&
+	    phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3314 Toplogy changed was 0x%x is 0x%x\n",
+				phba->fc_topology,
+				bf_get(lpfc_mbx_read_top_topology, la));
+		phba->fc_topology_changed = 1;
+	}
+
+	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
+	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+
+	shost = lpfc_shost_from_vport(vport);
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+		/* if npiv is enabled and this adapter supports npiv log
+		 * a message that npiv is not supported in this topology
+		 */
+		if (phba->cfg_enable_npiv && phba->max_vpi)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1309 Link Up Event npiv not supported in loop "
+				"topology\n");
+				/* Get Loop Map information */
+		if (bf_get(lpfc_mbx_read_top_il, la))
+			fc_flags |= FC_LBIT;
+
+		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
+		i = la->lilpBde64.tus.f.bdeSize;
+
+		if (i == 0) {
+			phba->alpa_map[0] = 0;
+		} else {
+			if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
+				int numalpa, j, k;
+				union {
+					uint8_t pamap[16];
+					struct {
+						uint32_t wd1;
+						uint32_t wd2;
+						uint32_t wd3;
+						uint32_t wd4;
+					} pa;
+				} un;
+				numalpa = phba->alpa_map[0];
+				j = 0;
+				while (j < numalpa) {
+					memset(un.pamap, 0, 16);
+					for (k = 1; j < numalpa; k++) {
+						un.pamap[k - 1] =
+							phba->alpa_map[j + 1];
+						j++;
+						if (k == 16)
+							break;
+					}
+					/* Link Up Event ALPA map */
+					lpfc_printf_log(phba,
+							KERN_WARNING,
+							LOG_LINK_EVENT,
+							"1304 Link Up Event "
+							"ALPA map Data: x%x "
+							"x%x x%x x%x\n",
+							un.pa.wd1, un.pa.wd2,
+							un.pa.wd3, un.pa.wd4);
+				}
+			}
+		}
+	} else {
+		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+			if (phba->max_vpi && phba->cfg_enable_npiv &&
+			   (phba->sli_rev >= LPFC_SLI_REV3))
+				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+		}
+		vport->fc_myDID = phba->fc_pref_DID;
+		fc_flags |= FC_LBIT;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	if (fc_flags) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= fc_flags;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	lpfc_linkup(phba);
+	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!sparam_mbox)
+		goto out;
+
+	rc = lpfc_read_sparam(phba, sparam_mbox, 0);
+	if (rc) {
+		mempool_free(sparam_mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+	sparam_mbox->vport = vport;
+	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(sparam_mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!cfglink_mbox)
+			goto out;
+		vport->port_state = LPFC_LOCAL_CFG_LINK;
+		lpfc_config_link(phba, cfglink_mbox);
+		cfglink_mbox->vport = vport;
+		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+			goto out;
+		}
+	} else {
+		vport->port_state = LPFC_VPORT_UNKNOWN;
+		/*
+		 * Add the driver's default FCF record at FCF index 0 now. This
+		 * is phase 1 implementation that support FCF index 0 and driver
+		 * defaults.
+		 */
+		if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+			fcf_record = kzalloc(sizeof(struct fcf_record),
+					GFP_KERNEL);
+			if (unlikely(!fcf_record)) {
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"2554 Could not allocate memory for "
+					"fcf record\n");
+				rc = -ENODEV;
+				goto out;
+			}
+
+			lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
+						LPFC_FCOE_FCF_DEF_INDEX);
+			rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
+			if (unlikely(rc)) {
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"2013 Could not manually add FCF "
+					"record 0, status %d\n", rc);
+				rc = -ENODEV;
+				kfree(fcf_record);
+				goto out;
+			}
+			kfree(fcf_record);
+		}
+		/*
+		 * The driver is expected to do FIP/FCF. Call the port
+		 * and get the FCF Table.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG) {
+			spin_unlock_irq(&phba->hbalock);
+			return;
+		}
+		/* This is the initial FCF discovery scan */
+		phba->fcf.fcf_flag |= FCF_INIT_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2778 Start FCF table scan at linkup\n");
+		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						     LPFC_FCOE_FCF_GET_FIRST);
+		if (rc) {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			goto out;
+		}
+		/* Reset FCF roundrobin bmask for new discovery */
+		lpfc_sli4_clear_fcf_rr_bmask(phba);
+	}
+
+	return;
+out:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+			 vport->port_state, sparam_mbox, cfglink_mbox);
+	lpfc_issue_clear_la(phba, vport);
+	return;
+}
+
+static void
+lpfc_enable_la(struct lpfc_hba *phba)
+{
+	uint32_t control;
+	struct lpfc_sli *psli = &phba->sli;
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		control = readl(phba->HCregaddr);
+		control |= HC_LAINT_ENA;
+		writel(control, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+	lpfc_linkdown(phba);
+	lpfc_enable_la(phba);
+	lpfc_unregister_unused_fcf(phba);
+	/* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
+/*
+ * This routine handles processing a READ_TOPOLOGY mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer. SLI4 only.
+ */
+void
+lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_mbx_read_top *la;
+	struct lpfc_sli_ring *pring;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	uint8_t attn_type;
+
+	/* Unblock ELS traffic */
+	pring = lpfc_phba_elsring(phba);
+	if (pring)
+		pring->flag &= ~LPFC_STOP_IOCB_EVENT;
+
+	/* Check for error */
+	if (mb->mbxStatus) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+				"1307 READ_LA mbox error x%x state x%x\n",
+				mb->mbxStatus, vport->port_state);
+		lpfc_mbx_issue_link_down(phba);
+		phba->link_state = LPFC_HBA_ERROR;
+		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
+	}
+
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+	attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
+
+	memcpy(&phba->alpa_map[0], mp->virt, 128);
+
+	spin_lock_irq(shost->host_lock);
+	if (bf_get(lpfc_mbx_read_top_pb, la))
+		vport->fc_flag |= FC_BYPASSED_MODE;
+	else
+		vport->fc_flag &= ~FC_BYPASSED_MODE;
+	spin_unlock_irq(shost->host_lock);
+
+	if (phba->fc_eventTag <= la->eventTag) {
+		phba->fc_stat.LinkMultiEvent++;
+		if (attn_type == LPFC_ATT_LINK_UP)
+			if (phba->fc_eventTag != 0)
+				lpfc_linkdown(phba);
+	}
+
+	phba->fc_eventTag = la->eventTag;
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		spin_lock_irq(&phba->hbalock);
+		if (bf_get(lpfc_mbx_read_top_mm, la))
+			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+		else
+			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	phba->link_events++;
+	if ((attn_type == LPFC_ATT_LINK_UP) &&
+	    !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+		phba->fc_stat.LinkUp++;
+		if (phba->link_flag & LS_LOOPBACK_MODE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+					"1306 Link Up Event in loop back mode "
+					"x%x received Data: x%x x%x x%x x%x\n",
+					la->eventTag, phba->fc_eventTag,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
+					phba->alpa_map[0]);
+		} else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+					"1303 Link Up Event x%x received "
+					"Data: x%x x%x x%x x%x x%x x%x %d\n",
+					la->eventTag, phba->fc_eventTag,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
+					phba->alpa_map[0],
+					bf_get(lpfc_mbx_read_top_mm, la),
+					bf_get(lpfc_mbx_read_top_fa, la),
+					phba->wait_4_mlo_maint_flg);
+		}
+		lpfc_mbx_process_link_up(phba, la);
+	} else if (attn_type == LPFC_ATT_LINK_DOWN ||
+		   attn_type == LPFC_ATT_UNEXP_WWPN) {
+		phba->fc_stat.LinkDown++;
+		if (phba->link_flag & LS_LOOPBACK_MODE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1308 Link Down Event in loop back mode "
+				"x%x received "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+		else if (attn_type == LPFC_ATT_UNEXP_WWPN)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1313 Link Down UNEXP WWPN Event x%x received "
+				"Data: x%x x%x x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag,
+				bf_get(lpfc_mbx_read_top_mm, la),
+				bf_get(lpfc_mbx_read_top_fa, la));
+		else
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1305 Link Down Event x%x received "
+				"Data: x%x x%x x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag,
+				bf_get(lpfc_mbx_read_top_mm, la),
+				bf_get(lpfc_mbx_read_top_fa, la));
+		lpfc_mbx_issue_link_down(phba);
+	}
+	if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
+	    attn_type == LPFC_ATT_LINK_UP) {
+		if (phba->link_state != LPFC_LINK_DOWN) {
+			phba->fc_stat.LinkDown++;
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1312 Link Down Event x%x received "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+			lpfc_mbx_issue_link_down(phba);
+		} else
+			lpfc_enable_la(phba);
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1310 Menlo Maint Mode Link up Event x%x rcvd "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+		/*
+		 * The cmnd that triggered this will be waiting for this
+		 * signal.
+		 */
+		/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+		if (phba->wait_4_mlo_maint_flg) {
+			phba->wait_4_mlo_maint_flg = 0;
+			wake_up_interruptible(&phba->wait_4_mlo_m_q);
+		}
+	}
+
+	if ((phba->sli_rev < LPFC_SLI_REV4) &&
+	    bf_get(lpfc_mbx_read_top_fa, la)) {
+		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+			lpfc_issue_clear_la(phba, vport);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+				"1311 fa %d\n",
+				bf_get(lpfc_mbx_read_top_fa, la));
+	}
+
+lpfc_mbx_cmpl_read_topology_free_mbuf:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+/*
+ * This routine handles processing a REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport  *vport = pmb->vport;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+			 kref_read(&ndlp->kref),
+			 ndlp->nlp_usg_map, ndlp);
+	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+
+	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+		/* We rcvd a rscn after issuing this
+		 * mbox reg login, we may have cycled
+		 * back through the state and be
+		 * back at reg login state so this
+		 * mbox needs to be ignored becase
+		 * there is another reg login in
+		 * process.
+		 */
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+		spin_unlock_irq(shost->host_lock);
+
+		/*
+		 * We cannot leave the RPI registered because
+		 * if we go thru discovery again for this ndlp
+		 * a subsequent REG_RPI will fail.
+		 */
+		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+		lpfc_unreg_rpi(vport, ndlp);
+	}
+
+	/* Call state machine */
+	lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	/* decrement the node reference count held for this callback
+	 * function.
+	 */
+	lpfc_nlp_put(ndlp);
+
+	return;
+}
+
+static void
+lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	switch (mb->mbxStatus) {
+	case 0x0011:
+	case 0x0020:
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
+				 mb->mbxStatus);
+		break;
+	/* If VPI is busy, reset the HBA */
+	case 0x9700:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+			"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
+			vport->vpi, mb->mbxStatus);
+		if (!(phba->pport->load_flag & FC_UNLOADING))
+			lpfc_workq_post_event(phba, NULL, NULL,
+				LPFC_EVT_RESET_HBA);
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
+	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+	vport->unreg_vpi_cmpl = VPORT_OK;
+	mempool_free(pmb, phba->mbox_mem_pool);
+	lpfc_cleanup_vports_rrqs(vport, NULL);
+	/*
+	 * This shost reference might have been taken at the beginning of
+	 * lpfc_vport_delete()
+	 */
+	if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
+		scsi_host_put(shost);
+}
+
+int
+lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return 1;
+
+	lpfc_unreg_vpi(phba, vport->vpi, mbox);
+	mbox->vport = vport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+				 "1800 Could not issue unreg_vpi\n");
+		mempool_free(mbox, phba->mbox_mem_pool);
+		vport->unreg_vpi_cmpl = VPORT_ERROR;
+		return rc;
+	}
+	return 0;
+}
+
+static void
+lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	MAILBOX_t *mb = &pmb->u.mb;
+
+	switch (mb->mbxStatus) {
+	case 0x0011:
+	case 0x9601:
+	case 0x9602:
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
+				 mb->mbxStatus);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+		vport->fc_myDID = 0;
+
+		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+			if (phba->nvmet_support)
+				lpfc_nvmet_update_targetport(phba);
+			else
+				lpfc_nvme_update_localport(vport);
+		}
+		goto out;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	vport->vpi_state |= LPFC_VPI_REGISTERED;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+	vport->num_disc_nodes = 0;
+	/* go thru NPR list and issue ELS PLOGIs */
+	if (vport->fc_npr_cnt)
+		lpfc_els_disc_plogi(vport);
+
+	if (!vport->num_disc_nodes) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NDISC_ACTIVE;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_can_disctmo(vport);
+	}
+	vport->port_state = LPFC_VPORT_READY;
+
+out:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_create_static_vport - Read HBA config region to create static vports.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issue a DUMP mailbox command for config region 22 to get
+ * the list of static vports to be created. The function create vports
+ * based on the information returned from the HBA.
+ **/
+void
+lpfc_create_static_vport(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb = NULL;
+	MAILBOX_t *mb;
+	struct static_vport_info *vport_info;
+	int mbx_wait_rc = 0, i;
+	struct fc_vport_identifiers vport_id;
+	struct fc_vport *new_fc_vport;
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	uint16_t offset = 0;
+	uint8_t *vport_buff;
+	struct lpfc_dmabuf *mp;
+	uint32_t byte_count = 0;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0542 lpfc_create_static_vport failed to"
+				" allocate mailbox memory\n");
+		return;
+	}
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb = &pmb->u.mb;
+
+	vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
+	if (!vport_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0543 lpfc_create_static_vport failed to"
+				" allocate vport_info\n");
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	vport_buff = (uint8_t *) vport_info;
+	do {
+		/* free dma buffer from previous round */
+		if (pmb->context1) {
+			mp = (struct lpfc_dmabuf *)pmb->context1;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		if (lpfc_dump_static_vport(phba, pmb, offset))
+			goto out;
+
+		pmb->vport = phba->pport;
+		mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
+							LPFC_MBOX_TMO);
+
+		if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0544 lpfc_create_static_vport failed to"
+				" issue dump mailbox command ret 0x%x "
+				"status 0x%x\n",
+				mbx_wait_rc, mb->mbxStatus);
+			goto out;
+		}
+
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			byte_count = pmb->u.mqe.un.mb_words[5];
+			mp = (struct lpfc_dmabuf *)pmb->context1;
+			if (byte_count > sizeof(struct static_vport_info) -
+					offset)
+				byte_count = sizeof(struct static_vport_info)
+					- offset;
+			memcpy(vport_buff + offset, mp->virt, byte_count);
+			offset += byte_count;
+		} else {
+			if (mb->un.varDmp.word_cnt >
+				sizeof(struct static_vport_info) - offset)
+				mb->un.varDmp.word_cnt =
+					sizeof(struct static_vport_info)
+						- offset;
+			byte_count = mb->un.varDmp.word_cnt;
+			lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				vport_buff + offset,
+				byte_count);
+
+			offset += byte_count;
+		}
+
+	} while (byte_count &&
+		offset < sizeof(struct static_vport_info));
+
+
+	if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
+		((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
+			!= VPORT_INFO_REV)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0545 lpfc_create_static_vport bad"
+			" information header 0x%x 0x%x\n",
+			le32_to_cpu(vport_info->signature),
+			le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+
+		goto out;
+	}
+
+	shost = lpfc_shost_from_vport(phba->pport);
+
+	for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
+		memset(&vport_id, 0, sizeof(vport_id));
+		vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
+		vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
+		if (!vport_id.port_name || !vport_id.node_name)
+			continue;
+
+		vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+		vport_id.vport_type = FC_PORTTYPE_NPIV;
+		vport_id.disable = false;
+		new_fc_vport = fc_vport_create(shost, 0, &vport_id);
+
+		if (!new_fc_vport) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0546 lpfc_create_static_vport failed to"
+				" create vport\n");
+			continue;
+		}
+
+		vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
+		vport->vport_flag |= STATIC_VPORT;
+	}
+
+out:
+	kfree(vport_info);
+	if (mbx_wait_rc != MBX_TIMEOUT) {
+		if (pmb->context1) {
+			mp = (struct lpfc_dmabuf *)pmb->context1;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
+	return;
+}
+
+/*
+ * This routine handles processing a Fabric REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0258 Register Fabric login error: 0x%x\n",
+				 mb->mbxStatus);
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			/* FLOGI failed, use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			/* Decrement the reference count to ndlp after the
+			 * reference to the ndlp are done.
+			 */
+			lpfc_nlp_put(ndlp);
+			return;
+		}
+
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		/* Decrement the reference count to ndlp after the reference
+		 * to the ndlp are done.
+		 */
+		lpfc_nlp_put(ndlp);
+		return;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	ndlp->nlp_type |= NLP_FABRIC;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+		/* when physical port receive logo donot start
+		 * vport discovery */
+		if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+			lpfc_start_fdiscs(phba);
+		else {
+			shost = lpfc_shost_from_vport(vport);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+			spin_unlock_irq(shost->host_lock);
+		}
+		lpfc_do_scr_ns_plogi(phba, vport);
+	}
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* Drop the reference count from the mbox at the end after
+	 * all the current reference to the ndlp have been done.
+	 */
+	lpfc_nlp_put(ndlp);
+	return;
+}
+
+ /*
+  * This routine will issue a GID_FT for each FC4 Type supported
+  * by the driver. ALL GID_FTs must complete before discovery is started.
+  */
+int
+lpfc_issue_gidft(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+
+	/* Good status, issue CT Request to NameServer */
+	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+	    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
+		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
+			/* Cannot issue NameServer FCP Query, so finish up
+			 * discovery
+			 */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+					 "0604 %s FC TYPE %x %s\n",
+					 "Failed to issue GID_FT to ",
+					 FC_TYPE_FCP,
+					 "Finishing discovery.");
+			return 0;
+		}
+		vport->gidft_inp++;
+	}
+
+	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+	    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
+			/* Cannot issue NameServer NVME Query, so finish up
+			 * discovery
+			 */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+					 "0605 %s FC_TYPE %x %s %d\n",
+					 "Failed to issue GID_FT to ",
+					 FC_TYPE_NVME,
+					 "Finishing discovery: gidftinp ",
+					 vport->gidft_inp);
+			if (vport->gidft_inp == 0)
+				return 0;
+		} else
+			vport->gidft_inp++;
+	}
+	return vport->gidft_inp;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_vport *vport = pmb->vport;
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+	vport->gidft_inp = 0;
+
+	if (mb->mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0260 Register NameServer error: 0x%x\n",
+				 mb->mbxStatus);
+
+out:
+		/* decrement the node reference count held for this
+		 * callback function.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+		/* If no other thread is using the ndlp, free it */
+		lpfc_nlp_not_used(ndlp);
+
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			/*
+			 * RegLogin failed, use loop map to make discovery
+			 * list
+			 */
+			lpfc_disc_list_loopmap(vport);
+
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			return;
+		}
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	ndlp->nlp_type |= NLP_FABRIC;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+			 kref_read(&ndlp->kref),
+			 ndlp->nlp_usg_map, ndlp);
+
+	if (vport->port_state < LPFC_VPORT_READY) {
+		/* Link up discovery requires Fabric registration. */
+		lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
+		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+
+		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
+
+		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
+				    FC_TYPE_NVME);
+
+		/* Issue SCR just before NameServer GID_FT Query */
+		lpfc_issue_els_scr(vport, SCR_DID, 0);
+	}
+
+	vport->fc_ns_retry = 0;
+	if (lpfc_issue_gidft(vport) == 0)
+		goto out;
+
+	/*
+	 * At this point in time we may need to wait for multiple
+	 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
+	 *
+	 * decrement the node reference count held for this
+	 * callback function.
+	 */
+	lpfc_nlp_put(ndlp);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return;
+}
+
+static void
+lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct fc_rport  *rport;
+	struct lpfc_rport_data *rdata;
+	struct fc_rport_identifiers rport_ids;
+	struct lpfc_hba  *phba = vport->phba;
+
+	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+		return;
+
+	/* Remote port has reappeared. Re-register w/ FC transport */
+	rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+	rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+	rport_ids.port_id = ndlp->nlp_DID;
+	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+	/*
+	 * We leave our node pointer in rport->dd_data when we unregister a
+	 * FCP target port.  But fc_remote_port_add zeros the space to which
+	 * rport->dd_data points.  So, if we're reusing a previously
+	 * registered port, drop the reference that we took the last time we
+	 * registered the port.
+	 */
+	rport = ndlp->rport;
+	if (rport) {
+		rdata = rport->dd_data;
+		/* break the link before dropping the ref */
+		ndlp->rport = NULL;
+		if (rdata) {
+			if (rdata->pnode == ndlp)
+				lpfc_nlp_put(ndlp);
+			rdata->pnode = NULL;
+		}
+		/* drop reference for earlier registeration */
+		put_device(&rport->dev);
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport add:       did:x%x flg:x%x type x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+	/* Don't add the remote port if unloading. */
+	if (vport->load_flag & FC_UNLOADING)
+		return;
+
+	ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
+	if (!rport || !get_device(&rport->dev)) {
+		dev_printk(KERN_WARNING, &phba->pcidev->dev,
+			   "Warning: fc_remote_port_add failed\n");
+		return;
+	}
+
+	/* initialize static port data */
+	rport->maxframe_size = ndlp->nlp_maxframe;
+	rport->supported_classes = ndlp->nlp_class_sup;
+	rdata = rport->dd_data;
+	rdata->pnode = lpfc_nlp_get(ndlp);
+
+	if (ndlp->nlp_type & NLP_FCP_TARGET)
+		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
+		fc_remote_port_rolechg(rport, rport_ids.roles);
+
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+			 "3183 rport register x%06x, rport %p role x%x\n",
+			 ndlp->nlp_DID, rport, rport_ids.roles);
+
+	if ((rport->scsi_target_id != -1) &&
+	    (rport->scsi_target_id < LPFC_MAX_TARGET)) {
+		ndlp->nlp_sid = rport->scsi_target_id;
+	}
+	return;
+}
+
+static void
+lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
+{
+	struct fc_rport *rport = ndlp->rport;
+	struct lpfc_vport *vport = ndlp->vport;
+	struct lpfc_hba  *phba = vport->phba;
+
+	if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+		return;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport delete:    did:x%x flg:x%x type x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "3184 rport unregister x%06x, rport %p\n",
+			 ndlp->nlp_DID, rport);
+
+	fc_remote_port_delete(rport);
+
+	return;
+}
+
+static void
+lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	switch (state) {
+	case NLP_STE_UNUSED_NODE:
+		vport->fc_unused_cnt += count;
+		break;
+	case NLP_STE_PLOGI_ISSUE:
+		vport->fc_plogi_cnt += count;
+		break;
+	case NLP_STE_ADISC_ISSUE:
+		vport->fc_adisc_cnt += count;
+		break;
+	case NLP_STE_REG_LOGIN_ISSUE:
+		vport->fc_reglogin_cnt += count;
+		break;
+	case NLP_STE_PRLI_ISSUE:
+		vport->fc_prli_cnt += count;
+		break;
+	case NLP_STE_UNMAPPED_NODE:
+		vport->fc_unmap_cnt += count;
+		break;
+	case NLP_STE_MAPPED_NODE:
+		vport->fc_map_cnt += count;
+		break;
+	case NLP_STE_NPR_NODE:
+		if (vport->fc_npr_cnt == 0 && count == -1)
+			vport->fc_npr_cnt = 0;
+		else
+			vport->fc_npr_cnt += count;
+		break;
+	}
+	spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		       int old_state, int new_state)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (new_state == NLP_STE_UNMAPPED_NODE) {
+		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+		ndlp->nlp_type |= NLP_FC_NODE;
+	}
+	if (new_state == NLP_STE_MAPPED_NODE)
+		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+	if (new_state == NLP_STE_NPR_NODE)
+		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+
+	/* FCP and NVME Transport interface */
+	if ((old_state == NLP_STE_MAPPED_NODE ||
+	     old_state == NLP_STE_UNMAPPED_NODE)) {
+		if (ndlp->rport) {
+			vport->phba->nport_event_cnt++;
+			lpfc_unregister_remote_port(ndlp);
+		}
+
+		if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+			vport->phba->nport_event_cnt++;
+			if (vport->phba->nvmet_support == 0)
+				/* Start devloss */
+				lpfc_nvme_unregister_port(vport, ndlp);
+			else
+				/* NVMET has no upcall. */
+				lpfc_nlp_put(ndlp);
+		}
+	}
+
+	/* FCP and NVME Transport interfaces */
+
+	if (new_state ==  NLP_STE_MAPPED_NODE ||
+	    new_state == NLP_STE_UNMAPPED_NODE) {
+		if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+		    ndlp->nlp_DID == Fabric_DID ||
+		    ndlp->nlp_DID == NameServer_DID ||
+		    ndlp->nlp_DID == FDMI_DID) {
+			vport->phba->nport_event_cnt++;
+			/*
+			 * Tell the fc transport about the port, if we haven't
+			 * already. If we have, and it's a scsi entity, be
+			 */
+			lpfc_register_remote_port(vport, ndlp);
+		}
+		/* Notify the NVME transport of this new rport. */
+		if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+		    ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+			if (vport->phba->nvmet_support == 0) {
+				/* Register this rport with the transport.
+				 * Initiators take the NDLP ref count in
+				 * the register.
+				 */
+				vport->phba->nport_event_cnt++;
+				lpfc_nvme_register_port(vport, ndlp);
+			} else {
+				/* Just take an NDLP ref count since the
+				 * target does not register rports.
+				 */
+				lpfc_nlp_get(ndlp);
+			}
+		}
+	}
+
+	if ((new_state ==  NLP_STE_MAPPED_NODE) &&
+		(vport->stat_data_enabled)) {
+		/*
+		 * A new target is discovered, if there is no buffer for
+		 * statistical data collection allocate buffer.
+		 */
+		ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+					 sizeof(struct lpfc_scsicmd_bkt),
+					 GFP_KERNEL);
+
+		if (!ndlp->lat_data)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+				"0286 lpfc_nlp_state_cleanup failed to "
+				"allocate statistical data buffer DID "
+				"0x%x\n", ndlp->nlp_DID);
+	}
+	/*
+	 * If the node just added to Mapped list was an FCP target,
+	 * but the remote port registration failed or assigned a target
+	 * id outside the presentable range - move the node to the
+	 * Unmapped List.
+	 */
+	if ((new_state == NLP_STE_MAPPED_NODE) &&
+	    (ndlp->nlp_type & NLP_FCP_TARGET) &&
+	    (!ndlp->rport ||
+	     ndlp->rport->scsi_target_id == -1 ||
+	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	}
+}
+
+static char *
+lpfc_nlp_state_name(char *buffer, size_t size, int state)
+{
+	static char *states[] = {
+		[NLP_STE_UNUSED_NODE] = "UNUSED",
+		[NLP_STE_PLOGI_ISSUE] = "PLOGI",
+		[NLP_STE_ADISC_ISSUE] = "ADISC",
+		[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
+		[NLP_STE_PRLI_ISSUE] = "PRLI",
+		[NLP_STE_LOGO_ISSUE] = "LOGO",
+		[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
+		[NLP_STE_MAPPED_NODE] = "MAPPED",
+		[NLP_STE_NPR_NODE] = "NPR",
+	};
+
+	if (state < NLP_STE_MAX_STATE && states[state])
+		strlcpy(buffer, states[state], size);
+	else
+		snprintf(buffer, size, "unknown (%d)", state);
+	return buffer;
+}
+
+void
+lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		   int state)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	int  old_state = ndlp->nlp_state;
+	char name1[16], name2[16];
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "0904 NPort state transition x%06x, %s -> %s\n",
+			 ndlp->nlp_DID,
+			 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
+			 lpfc_nlp_state_name(name2, sizeof(name2), state));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+		"node statechg    did:x%x old:%d ste:%d",
+		ndlp->nlp_DID, old_state, state);
+
+	if (old_state == NLP_STE_NPR_NODE &&
+	    state != NLP_STE_NPR_NODE)
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if (old_state == NLP_STE_UNMAPPED_NODE) {
+		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+		ndlp->nlp_type &= ~NLP_FC_NODE;
+	}
+
+	if (list_empty(&ndlp->nlp_listp)) {
+		spin_lock_irq(shost->host_lock);
+		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+		spin_unlock_irq(shost->host_lock);
+	} else if (old_state)
+		lpfc_nlp_counters(vport, old_state, -1);
+
+	ndlp->nlp_state = state;
+	lpfc_nlp_counters(vport, state, 1);
+	lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
+}
+
+void
+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (list_empty(&ndlp->nlp_listp)) {
+		spin_lock_irq(shost->host_lock);
+		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+		spin_unlock_irq(shost->host_lock);
+	}
+}
+
+void
+lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+	spin_lock_irq(shost->host_lock);
+	list_del_init(&ndlp->nlp_listp);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+				NLP_STE_UNUSED_NODE);
+}
+
+static void
+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+				NLP_STE_UNUSED_NODE);
+}
+/**
+ * lpfc_initialize_node - Initialize all fields of node object
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ *
+ * This function is always called when node object need to be initialized.
+ * It initializes all the fields of the node object. Although the reference
+ * to phba from @ndlp can be obtained indirectly through it's reference to
+ * @vport, a direct reference to phba is taken here by @ndlp. This is due
+ * to the life-span of the @ndlp might go beyond the existence of @vport as
+ * the final release of ndlp is determined by its reference count. And, the
+ * operation on @ndlp needs the reference to phba.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	uint32_t did)
+{
+	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+	setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
+			(unsigned long)ndlp);
+	ndlp->nlp_DID = did;
+	ndlp->vport = vport;
+	ndlp->phba = vport->phba;
+	ndlp->nlp_sid = NLP_NO_SID;
+	ndlp->nlp_fc4_type = NLP_FC4_NONE;
+	kref_init(&ndlp->kref);
+	NLP_INT_NODE_ACT(ndlp);
+	atomic_set(&ndlp->cmd_pending, 0);
+	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+}
+
+struct lpfc_nodelist *
+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		 int state)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t did;
+	unsigned long flags;
+	unsigned long *active_rrqs_xri_bitmap = NULL;
+	int rpi = LPFC_RPI_ALLOC_ERROR;
+
+	if (!ndlp)
+		return NULL;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rpi = lpfc_sli4_alloc_rpi(vport->phba);
+		if (rpi == LPFC_RPI_ALLOC_ERROR)
+			return NULL;
+	}
+
+	spin_lock_irqsave(&phba->ndlp_lock, flags);
+	/* The ndlp should not be in memory free mode */
+	if (NLP_CHK_FREE_REQ(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0277 lpfc_enable_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+		goto free_rpi;
+	}
+	/* The ndlp should not already be in active mode */
+	if (NLP_CHK_NODE_ACT(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0278 lpfc_enable_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+		goto free_rpi;
+	}
+
+	/* Keep the original DID */
+	did = ndlp->nlp_DID;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
+
+	/* re-initialize ndlp except of ndlp linked list pointer */
+	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+	lpfc_initialize_node(vport, ndlp, did);
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
+
+	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+		ndlp->nlp_rpi = rpi;
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
+				 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+				 ndlp->nlp_flag,
+				 kref_read(&ndlp->kref),
+				 ndlp->nlp_usg_map, ndlp);
+	}
+
+
+	if (state != NLP_STE_UNUSED_NODE)
+		lpfc_nlp_set_state(vport, ndlp, state);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+		"node enable:       did:x%x",
+		ndlp->nlp_DID, 0, 0);
+	return ndlp;
+
+free_rpi:
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(vport->phba, rpi);
+	return NULL;
+}
+
+void
+lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	/*
+	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
+	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
+	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
+	 * until ALL other outstanding threads have completed. We check
+	 * that the ndlp not already in the UNUSED state before we proceed.
+	 */
+	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+		return;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_cleanup_vports_rrqs(vport, ndlp);
+		lpfc_unreg_rpi(vport, ndlp);
+	}
+
+	lpfc_nlp_put(ndlp);
+	return;
+}
+
+/*
+ * Start / ReStart rescue timer for Discovery / RSCN handling
+ */
+void
+lpfc_set_disctmo(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	uint32_t tmo;
+
+	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
+		/* For FAN, timeout should be greater than edtov */
+		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
+	} else {
+		/* Normal discovery timeout should be > than ELS/CT timeout
+		 * FC spec states we need 3 * ratov for CT requests
+		 */
+		tmo = ((phba->fc_ratov * 3) + 3);
+	}
+
+
+	if (!timer_pending(&vport->fc_disctmo)) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+			"set disc timer:  tmo:x%x state:x%x flg:x%x",
+			tmo, vport->port_state, vport->fc_flag);
+	}
+
+	mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_DISC_TMO;
+	spin_unlock_irq(shost->host_lock);
+
+	/* Start Discovery Timer state <hba_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0247 Start Discovery Timer state x%x "
+			 "Data: x%x x%lx x%x x%x\n",
+			 vport->port_state, tmo,
+			 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
+			 vport->fc_adisc_cnt);
+
+	return;
+}
+
+/*
+ * Cancel rescue timer for Discovery / RSCN handling
+ */
+int
+lpfc_can_disctmo(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	unsigned long iflags;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"can disc timer:  state:x%x rtry:x%x flg:x%x",
+		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+	/* Turn off discovery timer if its running */
+	if (vport->fc_flag & FC_DISC_TMO) {
+		spin_lock_irqsave(shost->host_lock, iflags);
+		vport->fc_flag &= ~FC_DISC_TMO;
+		spin_unlock_irqrestore(shost->host_lock, iflags);
+		del_timer_sync(&vport->fc_disctmo);
+		spin_lock_irqsave(&vport->work_port_lock, iflags);
+		vport->work_port_events &= ~WORKER_DISC_TMO;
+		spin_unlock_irqrestore(&vport->work_port_lock, iflags);
+	}
+
+	/* Cancel Discovery Timer state <hba_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0248 Cancel Discovery Timer state x%x "
+			 "Data: x%x x%x x%x\n",
+			 vport->port_state, vport->fc_flag,
+			 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
+	return 0;
+}
+
+/*
+ * Check specified ring for outstanding IOCB on the SLI queue
+ * Return true if iocb matches the specified nport
+ */
+int
+lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+		    struct lpfc_sli_ring *pring,
+		    struct lpfc_iocbq *iocb,
+		    struct lpfc_nodelist *ndlp)
+{
+	IOCB_t *icmd = &iocb->iocb;
+	struct lpfc_vport    *vport = ndlp->vport;
+
+	if (iocb->vport != vport)
+		return 0;
+
+	if (pring->ringno == LPFC_ELS_RING) {
+		switch (icmd->ulpCommand) {
+		case CMD_GEN_REQUEST64_CR:
+			if (iocb->context_un.ndlp == ndlp)
+				return 1;
+		case CMD_ELS_REQUEST64_CR:
+			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+				return 1;
+		case CMD_XMIT_ELS_RSP64_CX:
+			if (iocb->context1 == (uint8_t *) ndlp)
+				return 1;
+		}
+	} else if (pring->ringno == LPFC_FCP_RING) {
+		/* Skip match check if waiting to relogin to FCP target */
+		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+			return 0;
+		}
+		if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static void
+__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
+		struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
+		struct list_head *dequeue_list)
+{
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		/* Check to see if iocb matches the nport */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+			/* match, dequeue */
+			list_move_tail(&iocb->list, dequeue_list);
+	}
+}
+
+static void
+lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
+		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t i;
+
+	spin_lock_irq(&phba->hbalock);
+	for (i = 0; i < psli->num_rings; i++)
+		__lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
+						dequeue_list);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
+		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
+{
+	struct lpfc_sli_ring *pring;
+	struct lpfc_queue *qp = NULL;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+		pring = qp->pring;
+		if (!pring)
+			continue;
+		spin_lock(&pring->ring_lock);
+		__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
+		spin_unlock(&pring->ring_lock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with nlp_rpi in the LPFC_NODELIST entry.
+ */
+static int
+lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+
+	lpfc_fabric_abort_nport(ndlp);
+
+	/*
+	 * Everything that matches on txcmplq will be returned
+	 * by firmware with a no rpi error.
+	 */
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+		if (phba->sli_rev != LPFC_SLI_REV4)
+			lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
+		else
+			lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
+	}
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+
+	return 0;
+}
+
+/**
+ * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function will issue an ELS LOGO command after completing
+ * the UNREG_RPI.
+ **/
+static void
+lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport  *vport = pmb->vport;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = (struct lpfc_nodelist *)(pmb->context1);
+	if (!ndlp)
+		return;
+	lpfc_issue_els_logo(vport, ndlp, 0);
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/*
+ * Free rpi associated with LPFC_NODELIST entry.
+ * This routine is called from lpfc_freenode(), when we are removing
+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
+ * LOGO that completes successfully, and we are waiting to PLOGI back
+ * to the remote NPort. In addition, it is called after we receive
+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
+ * we are waiting to PLOGI back to the remote NPort.
+ */
+int
+lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t    *mbox;
+	int rc, acc_plogi = 1;
+	uint16_t rpi;
+
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
+	    ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
+		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+					 "3366 RPI x%x needs to be "
+					 "unregistered nlp_flag x%x "
+					 "did x%x\n",
+					 ndlp->nlp_rpi, ndlp->nlp_flag,
+					 ndlp->nlp_DID);
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mbox) {
+			/* SLI4 ports require the physical rpi value. */
+			rpi = ndlp->nlp_rpi;
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+			lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
+			mbox->vport = vport;
+			if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+				mbox->context1 = ndlp;
+				mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+			} else {
+				if (phba->sli_rev == LPFC_SLI_REV4 &&
+				    (!(vport->load_flag & FC_UNLOADING)) &&
+				    (bf_get(lpfc_sli_intf_if_type,
+				     &phba->sli4_hba.sli_intf) >=
+				      LPFC_SLI_INTF_IF_TYPE_2) &&
+				    (kref_read(&ndlp->kref) > 0)) {
+					mbox->context1 = lpfc_nlp_get(ndlp);
+					mbox->mbox_cmpl =
+						lpfc_sli4_unreg_rpi_cmpl_clr;
+					/*
+					 * accept PLOGIs after unreg_rpi_cmpl
+					 */
+					acc_plogi = 0;
+				} else
+					mbox->mbox_cmpl =
+						lpfc_sli_def_mbox_cmpl;
+			}
+
+			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+				acc_plogi = 1;
+			}
+		}
+		lpfc_no_rpi(phba, ndlp);
+
+		if (phba->sli_rev != LPFC_SLI_REV4)
+			ndlp->nlp_rpi = 0;
+		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		if (acc_plogi)
+			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+		return 1;
+	}
+	ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+	return 0;
+}
+
+/**
+ * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unregister all the currently registered RPIs
+ * to the HBA.
+ **/
+void
+lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (!vports) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2884 Vport array allocation failed \n");
+		return;
+	}
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		shost = lpfc_shost_from_vport(vports[i]);
+		spin_lock_irq(shost->host_lock);
+		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+				/* The mempool_alloc might sleep */
+				spin_unlock_irq(shost->host_lock);
+				lpfc_unreg_rpi(vports[i], ndlp);
+				spin_lock_irq(shost->host_lock);
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_unreg_all_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_sli4_unreg_all_rpis(vport);
+		return;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+				 mbox);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+
+		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+				"1836 Could not issue "
+				"unreg_login(all_rpis) status %d\n", rc);
+	}
+}
+
+void
+lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	/* Unreg DID is an SLI3 operation. */
+	if (phba->sli_rev > LPFC_SLI_REV3)
+		return;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+			       mbox);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+
+		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+					 "1815 Could not issue "
+					 "unreg_did (default rpis) status %d\n",
+					 rc);
+	}
+}
+
+/*
+ * Free resources associated with LPFC_NODELIST entry
+ * so it can be freed.
+ */
+static int
+lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mb, *nextmb;
+	struct lpfc_dmabuf *mp;
+
+	/* Cleanup node for NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "0900 Cleanup node for NPort x%x "
+			 "Data: x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag,
+			 ndlp->nlp_state, ndlp->nlp_rpi);
+	if (NLP_CHK_FREE_REQ(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0280 lpfc_cleanup_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+		lpfc_dequeue_node(vport, ndlp);
+	} else {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0281 lpfc_cleanup_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+		lpfc_disable_node(vport, ndlp);
+	}
+
+
+	/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
+
+	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+	if ((mb = phba->sli.mbox_active)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
+		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			mb->context2 = NULL;
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	/* Cleanup REG_LOGIN completions which are not yet processed */
+	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+			(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
+			(ndlp != (struct lpfc_nodelist *) mb->context2))
+			continue;
+
+		mb->context2 = NULL;
+		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	}
+
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
+		    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			list_del(&mb->list);
+			mempool_free(mb, phba->mbox_mem_pool);
+			/* We shall not invoke the lpfc_nlp_put to decrement
+			 * the ndlp reference count as we are in the process
+			 * of lpfc_nlp_release.
+			 */
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_els_abort(phba, ndlp);
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+
+	ndlp->nlp_last_elscmd = 0;
+	del_timer_sync(&ndlp->nlp_delayfunc);
+
+	list_del_init(&ndlp->els_retry_evt.evt_listp);
+	list_del_init(&ndlp->dev_loss_evt.evt_listp);
+	lpfc_cleanup_vports_rrqs(vport, ndlp);
+	lpfc_unreg_rpi(vport, ndlp);
+
+	return 0;
+}
+
+/*
+ * Check to see if we can free the nlp back to the freelist.
+ * If we are in the middle of using the nlp in the discovery state
+ * machine, defer the free till we reach the end of the state machine.
+ */
+static void
+lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_rport_data *rdata;
+	struct fc_rport *rport;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+	    phba->sli_rev != LPFC_SLI_REV4) {
+		/* For this case we need to cleanup the default rpi
+		 * allocated by the firmware.
+		 */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+				 kref_read(&ndlp->kref),
+				 ndlp->nlp_usg_map, ndlp);
+		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
+			!= NULL) {
+			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
+			    (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
+			if (rc) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+			}
+			else {
+				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+				mbox->vport = vport;
+				mbox->context2 = ndlp;
+				rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+				if (rc == MBX_NOT_FINISHED) {
+					mempool_free(mbox, phba->mbox_mem_pool);
+				}
+			}
+		}
+	}
+	lpfc_cleanup_node(vport, ndlp);
+
+	/*
+	 * ndlp->rport must be set to NULL before it reaches here
+	 * i.e. break rport/node link before doing lpfc_nlp_put for
+	 * registered rport and then drop the reference of rport.
+	 */
+	if (ndlp->rport) {
+		/*
+		 * extra lpfc_nlp_put dropped the reference of ndlp
+		 * for registered rport so need to cleanup rport
+		 */
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0940 removed node x%p DID x%x "
+				" rport not null %p\n",
+				ndlp, ndlp->nlp_DID, ndlp->rport);
+		rport = ndlp->rport;
+		rdata = rport->dd_data;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		put_device(&rport->dev);
+	}
+}
+
+static int
+lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      uint32_t did)
+{
+	D_ID mydid, ndlpdid, matchdid;
+
+	if (did == Bcast_DID)
+		return 0;
+
+	/* First check for Direct match */
+	if (ndlp->nlp_DID == did)
+		return 1;
+
+	/* Next check for area/domain identically equals 0 match */
+	mydid.un.word = vport->fc_myDID;
+	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
+		return 0;
+	}
+
+	matchdid.un.word = did;
+	ndlpdid.un.word = ndlp->nlp_DID;
+	if (matchdid.un.b.id == ndlpdid.un.b.id) {
+		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
+		    (mydid.un.b.area == matchdid.un.b.area)) {
+			/* This code is supposed to match the ID
+			 * for a private loop device that is
+			 * connect to fl_port. But we need to
+			 * check that the port did not just go
+			 * from pt2pt to fabric or we could end
+			 * up matching ndlp->nlp_DID 000001 to
+			 * fabric DID 0x20101
+			 */
+			if ((ndlpdid.un.b.domain == 0) &&
+			    (ndlpdid.un.b.area == 0)) {
+				if (ndlpdid.un.b.id &&
+				    vport->phba->fc_topology ==
+				    LPFC_TOPOLOGY_LOOP)
+					return 1;
+			}
+			return 0;
+		}
+
+		matchdid.un.word = ndlp->nlp_DID;
+		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
+		    (mydid.un.b.area == ndlpdid.un.b.area)) {
+			if ((matchdid.un.b.domain == 0) &&
+			    (matchdid.un.b.area == 0)) {
+				if (matchdid.un.b.id)
+					return 1;
+			}
+		}
+	}
+	return 0;
+}
+
+/* Search for a nodelist entry */
+static struct lpfc_nodelist *
+__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+{
+	struct lpfc_nodelist *ndlp;
+	uint32_t data1;
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (lpfc_matchdid(vport, ndlp, did)) {
+			data1 = (((uint32_t) ndlp->nlp_state << 24) |
+				 ((uint32_t) ndlp->nlp_xri << 16) |
+				 ((uint32_t) ndlp->nlp_type << 8) |
+				 ((uint32_t) ndlp->nlp_rpi & 0xff));
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+					 "0929 FIND node DID "
+					 "Data: x%p x%x x%x x%x %p\n",
+					 ndlp, ndlp->nlp_DID,
+					 ndlp->nlp_flag, data1,
+					 ndlp->active_rrqs_xri_bitmap);
+			return ndlp;
+		}
+	}
+
+	/* FIND node did <did> NOT FOUND */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "0932 FIND node did x%x NOT FOUND.\n", did);
+	return NULL;
+}
+
+struct lpfc_nodelist *
+lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+	unsigned long iflags;
+
+	spin_lock_irqsave(shost->host_lock, iflags);
+	ndlp = __lpfc_findnode_did(vport, did);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
+	return ndlp;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = lpfc_findnode_did(vport, did);
+	if (!ndlp) {
+		if (vport->phba->nvmet_support)
+			return NULL;
+		if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+		    lpfc_rscn_payload_check(vport, did) == 0)
+			return NULL;
+		ndlp = lpfc_nlp_init(vport, did);
+		if (!ndlp)
+			return NULL;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp;
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		if (vport->phba->nvmet_support)
+			return NULL;
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
+		if (!ndlp)
+			return NULL;
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp;
+	}
+
+	/* The NVME Target does not want to actively manage an rport.
+	 * The goal is to allow the target to reset its state and clear
+	 * pending IO in preparation for the initiator to recover.
+	 */
+	if ((vport->fc_flag & FC_RSCN_MODE) &&
+	    !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+		if (lpfc_rscn_payload_check(vport, did)) {
+
+			/* Since this node is marked for discovery,
+			 * delay timeout is not needed.
+			 */
+			lpfc_cancel_retry_delay_tmo(vport, ndlp);
+
+			/* NVME Target mode waits until rport is known to be
+			 * impacted by the RSCN before it transitions.  No
+			 * active management - just go to NPR provided the
+			 * node had a valid login.
+			 */
+			if (vport->phba->nvmet_support)
+				return ndlp;
+
+			/* If we've already received a PLOGI from this NPort
+			 * we don't need to try to discover it again.
+			 */
+			if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+			    !(ndlp->nlp_type &
+			     (NLP_FCP_TARGET | NLP_NVME_TARGET)))
+				return NULL;
+
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+		} else
+			ndlp = NULL;
+	} else {
+		/* If the initiator received a PLOGI from this NPort or if the
+		 * initiator is already in the process of discovery on it,
+		 * there's no need to try to discover it again.
+		 */
+		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
+		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+		    (!vport->phba->nvmet_support &&
+		     ndlp->nlp_flag & NLP_RCV_PLOGI))
+			return NULL;
+
+		if (vport->phba->nvmet_support)
+			return ndlp;
+
+		/* Moving to NPR state clears unsolicited flags and
+		 * allows for rediscovery
+		 */
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return ndlp;
+}
+
+/* Build a list of nodes to discover based on the loopmap */
+void
+lpfc_disc_list_loopmap(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	int j;
+	uint32_t alpa, index;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+		return;
+
+	/* Check for loop map present or not */
+	if (phba->alpa_map[0]) {
+		for (j = 1; j <= phba->alpa_map[0]; j++) {
+			alpa = phba->alpa_map[j];
+			if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
+				continue;
+			lpfc_setup_disc_node(vport, alpa);
+		}
+	} else {
+		/* No alpamap, so try all alpa's */
+		for (j = 0; j < FC_MAXLOOP; j++) {
+			/* If cfg_scan_down is set, start from highest
+			 * ALPA (0xef) to lowest (0x1).
+			 */
+			if (vport->cfg_scan_down)
+				index = j;
+			else
+				index = FC_MAXLOOP - j - 1;
+			alpa = lpfcAlpaArray[index];
+			if ((vport->fc_myDID & 0xff) == alpa)
+				continue;
+			lpfc_setup_disc_node(vport, alpa);
+		}
+	}
+	return;
+}
+
+/* SLI3 only */
+void
+lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
+	struct lpfc_sli_ring *fcp_ring   = &psli->sli3_ring[LPFC_FCP_RING];
+	int  rc;
+
+	/*
+	 * if it's not a physical port or if we already send
+	 * clear_la then don't send it.
+	 */
+	if ((phba->link_state >= LPFC_CLEAR_LA) ||
+	    (vport->port_type != LPFC_PHYSICAL_PORT) ||
+		(phba->sli_rev == LPFC_SLI_REV4))
+		return;
+
+			/* Link up discovery */
+	if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
+		phba->link_state = LPFC_CLEAR_LA;
+		lpfc_clear_la(phba, mbox);
+		mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+		mbox->vport = vport;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(mbox, phba->mbox_mem_pool);
+			lpfc_disc_flush_list(vport);
+			extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+			fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+			phba->link_state = LPFC_HBA_ERROR;
+		}
+	}
+}
+
+/* Reg_vpi to tell firmware to resume normal operations */
+void
+lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *regvpimbox;
+
+	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (regvpimbox) {
+		lpfc_reg_vpi(vport, regvpimbox);
+		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
+		regvpimbox->vport = vport;
+		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
+					== MBX_NOT_FINISHED) {
+			mempool_free(regvpimbox, phba->mbox_mem_pool);
+		}
+	}
+}
+
+/* Start Link up / RSCN discovery on NPR nodes */
+void
+lpfc_disc_start(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	uint32_t num_sent;
+	uint32_t clear_la_pending;
+
+	if (!lpfc_is_link_up(phba)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+				 "3315 Link is not up %x\n",
+				 phba->link_state);
+		return;
+	}
+
+	if (phba->link_state == LPFC_CLEAR_LA)
+		clear_la_pending = 1;
+	else
+		clear_la_pending = 0;
+
+	if (vport->port_state < LPFC_VPORT_READY)
+		vport->port_state = LPFC_DISC_AUTH;
+
+	lpfc_set_disctmo(vport);
+
+	vport->fc_prevDID = vport->fc_myDID;
+	vport->num_disc_nodes = 0;
+
+	/* Start Discovery state <hba_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0202 Start Discovery hba state x%x "
+			 "Data: x%x x%x x%x\n",
+			 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
+			 vport->fc_adisc_cnt);
+
+	/* First do ADISCs - if any */
+	num_sent = lpfc_els_disc_adisc(vport);
+
+	if (num_sent)
+		return;
+
+	/* Register the VPI for SLI3, NPIV only. */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    !(vport->fc_flag & FC_PT2PT) &&
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
+		lpfc_issue_clear_la(phba, vport);
+		lpfc_issue_reg_vpi(phba, vport);
+		return;
+	}
+
+	/*
+	 * For SLI2, we need to set port_state to READY and continue
+	 * discovery.
+	 */
+	if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
+		/* If we get here, there is nothing to ADISC */
+		lpfc_issue_clear_la(phba, vport);
+
+		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+			vport->num_disc_nodes = 0;
+			/* go thru NPR nodes and issue ELS PLOGIs */
+			if (vport->fc_npr_cnt)
+				lpfc_els_disc_plogi(vport);
+
+			if (!vport->num_disc_nodes) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+			}
+		}
+		vport->port_state = LPFC_VPORT_READY;
+	} else {
+		/* Next do PLOGIs - if any */
+		num_sent = lpfc_els_disc_plogi(vport);
+
+		if (num_sent)
+			return;
+
+		if (vport->fc_flag & FC_RSCN_MODE) {
+			/* Check to see if more RSCNs came in while we
+			 * were processing this one.
+			 */
+			if ((vport->fc_rscn_id_cnt == 0) &&
+			    (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_RSCN_MODE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+			} else
+				lpfc_els_handle_rscn(vport);
+		}
+	}
+	return;
+}
+
+/*
+ *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
+ *  ring the match the sppecified nodelist.
+ */
+static void
+lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli;
+	IOCB_t     *icmd;
+	struct lpfc_iocbq    *iocb, *next_iocb;
+	struct lpfc_sli_ring *pring;
+
+	psli = &phba->sli;
+	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return;
+
+	/* Error matching iocb on txq or txcmplq
+	 * First check the txq.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		if (iocb->context1 != ndlp) {
+			continue;
+		}
+		icmd = &iocb->iocb;
+		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+			list_move_tail(&iocb->list, &completions);
+		}
+	}
+
+	/* Next check the txcmplq */
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+		if (iocb->context1 != ndlp) {
+			continue;
+		}
+		icmd = &iocb->iocb;
+		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
+		    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+static void
+lpfc_disc_flush_list(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	struct lpfc_hba *phba = vport->phba;
+
+	if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+					 nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+			    ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
+				lpfc_free_tx(phba, ndlp);
+			}
+		}
+	}
+}
+
+void
+lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+{
+	lpfc_els_flush_rscn(vport);
+	lpfc_els_flush_cmd(vport);
+	lpfc_disc_flush_list(vport);
+}
+
+/*****************************************************************************/
+/*
+ * NAME:     lpfc_disc_timeout
+ *
+ * FUNCTION: Fibre Channel driver discovery timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ *      Timer function
+ *
+ * RETURNS:
+ *      none
+ */
+/*****************************************************************************/
+void
+lpfc_disc_timeout(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long flags = 0;
+
+	if (unlikely(!phba))
+		return;
+
+	spin_lock_irqsave(&vport->work_port_lock, flags);
+	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_DISC_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, flags);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+static void
+lpfc_disc_timeout_handler(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli  *psli = &phba->sli;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	LPFC_MBOXQ_t *initlinkmbox;
+	int rc, clrlaerr = 0;
+
+	if (!(vport->fc_flag & FC_DISC_TMO))
+		return;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_DISC_TMO;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"disc timeout:    state:x%x rtry:x%x flg:x%x",
+		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+	switch (vport->port_state) {
+
+	case LPFC_LOCAL_CFG_LINK:
+		/*
+		 * port_state is identically  LPFC_LOCAL_CFG_LINK while
+		 * waiting for FAN timeout
+		 */
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+				 "0221 FAN timeout\n");
+
+		/* Start discovery by sending FLOGI, clean up old rpis */
+		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+					 nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
+				continue;
+			if (ndlp->nlp_type & NLP_FABRIC) {
+				/* Clean up the ndlp on Fabric connections */
+				lpfc_drop_node(vport, ndlp);
+
+			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+				/* Fail outstanding IO now since device
+				 * is marked for PLOGI.
+				 */
+				lpfc_unreg_rpi(vport, ndlp);
+			}
+		}
+		if (vport->port_state != LPFC_FLOGI) {
+			if (phba->sli_rev <= LPFC_SLI_REV3)
+				lpfc_initial_flogi(vport);
+			else
+				lpfc_issue_init_vfi(vport);
+			return;
+		}
+		break;
+
+	case LPFC_FDISC:
+	case LPFC_FLOGI:
+	/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+		/* Initial FLOGI timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0222 Initial %s timeout\n",
+				 vport->vpi ? "FDISC" : "FLOGI");
+
+		/* Assume no Fabric and go on with discovery.
+		 * Check for outstanding ELS FLOGI to abort.
+		 */
+
+		/* FLOGI failed, so just use loop map to make discovery list */
+		lpfc_disc_list_loopmap(vport);
+
+		/* Start discovery */
+		lpfc_disc_start(vport);
+		break;
+
+	case LPFC_FABRIC_CFG_LINK:
+	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
+	   NameServer login */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0223 Timeout while waiting for "
+				 "NameServer login\n");
+		/* Next look for NameServer ndlp */
+		ndlp = lpfc_findnode_did(vport, NameServer_DID);
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+			lpfc_els_abort(phba, ndlp);
+
+		/* ReStart discovery */
+		goto restart_disc;
+
+	case LPFC_NS_QRY:
+	/* Check for wait for NameServer Rsp timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0224 NameServer Query timeout "
+				 "Data: x%x x%x\n",
+				 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+			/* Try it one more time */
+			vport->fc_ns_retry++;
+			vport->gidft_inp = 0;
+			rc = lpfc_issue_gidft(vport);
+			if (rc == 0)
+				break;
+		}
+		vport->fc_ns_retry = 0;
+
+restart_disc:
+		/*
+		 * Discovery is over.
+		 * set port_state to PORT_READY if SLI2.
+		 * cmpl_reg_vpi will set port_state to READY for SLI3.
+		 */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				lpfc_issue_reg_vpi(phba, vport);
+			else  {
+				lpfc_issue_clear_la(phba, vport);
+				vport->port_state = LPFC_VPORT_READY;
+			}
+		}
+
+		/* Setup and issue mailbox INITIALIZE LINK command */
+		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!initlinkmbox) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					 "0206 Device Discovery "
+					 "completion error\n");
+			phba->link_state = LPFC_HBA_ERROR;
+			break;
+		}
+
+		lpfc_linkdown(phba);
+		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
+			       phba->cfg_link_speed);
+		initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+		initlinkmbox->vport = vport;
+		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
+		lpfc_set_loopback_flag(phba);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(initlinkmbox, phba->mbox_mem_pool);
+
+		break;
+
+	case LPFC_DISC_AUTH:
+	/* Node Authentication timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0227 Node Authentication timeout\n");
+		lpfc_disc_flush_list(vport);
+
+		/*
+		 * set port_state to PORT_READY if SLI2.
+		 * cmpl_reg_vpi will set port_state to READY for SLI3.
+		 */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				lpfc_issue_reg_vpi(phba, vport);
+			else  {	/* NPIV Not enabled */
+				lpfc_issue_clear_la(phba, vport);
+				vport->port_state = LPFC_VPORT_READY;
+			}
+		}
+		break;
+
+	case LPFC_VPORT_READY:
+		if (vport->fc_flag & FC_RSCN_MODE) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					 "0231 RSCN timeout Data: x%x "
+					 "x%x\n",
+					 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+			/* Cleanup any outstanding ELS commands */
+			lpfc_els_flush_cmd(vport);
+
+			lpfc_els_flush_rscn(vport);
+			lpfc_disc_flush_list(vport);
+		}
+		break;
+
+	default:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0273 Unexpected discovery timeout, "
+				 "vport State x%x\n", vport->port_state);
+		break;
+	}
+
+	switch (phba->link_state) {
+	case LPFC_CLEAR_LA:
+				/* CLEAR LA timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0228 CLEAR LA timeout\n");
+		clrlaerr = 1;
+		break;
+
+	case LPFC_LINK_UP:
+		lpfc_issue_clear_la(phba, vport);
+		/* Drop thru */
+	case LPFC_LINK_UNKNOWN:
+	case LPFC_WARM_START:
+	case LPFC_INIT_START:
+	case LPFC_INIT_MBX_CMDS:
+	case LPFC_LINK_DOWN:
+	case LPFC_HBA_ERROR:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0230 Unexpected timeout, hba link "
+				 "state x%x\n", phba->link_state);
+		clrlaerr = 1;
+		break;
+
+	case LPFC_HBA_READY:
+		break;
+	}
+
+	if (clrlaerr) {
+		lpfc_disc_flush_list(vport);
+		if (phba->sli_rev != LPFC_SLI_REV4) {
+			psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
+				~LPFC_STOP_IOCB_EVENT;
+			psli->sli3_ring[LPFC_FCP_RING].flag &=
+				~LPFC_STOP_IOCB_EVENT;
+		}
+		vport->port_state = LPFC_VPORT_READY;
+	}
+	return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_vport    *vport = pmb->vport;
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	ndlp->nlp_type |= NLP_FABRIC;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
+			 kref_read(&ndlp->kref),
+			 ndlp->nlp_usg_map, ndlp);
+	/*
+	 * Start issuing Fabric-Device Management Interface (FDMI) command to
+	 * 0xfffffa (FDMI well known port).
+	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
+	 * DPRT -> RPRT (vports)
+	 */
+	if (vport->port_type == LPFC_PHYSICAL_PORT)
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+	else
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+
+	/* decrement the node reference count held for this callback
+	 * function.
+	 */
+	lpfc_nlp_put(ndlp);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return;
+}
+
+static int
+lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
+{
+	uint16_t *rpi = param;
+
+	/* check for active node */
+	if (!NLP_CHK_NODE_ACT(ndlp))
+		return 0;
+
+	return ndlp->nlp_rpi == *rpi;
+}
+
+static int
+lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
+{
+	return memcmp(&ndlp->nlp_portname, param,
+		      sizeof(ndlp->nlp_portname)) == 0;
+}
+
+static struct lpfc_nodelist *
+__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
+{
+	struct lpfc_nodelist *ndlp;
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (filter(ndlp, param)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+					 "3185 FIND node filter %p DID "
+					 "Data: x%p x%x x%x\n",
+					 filter, ndlp, ndlp->nlp_DID,
+					 ndlp->nlp_flag);
+			return ndlp;
+		}
+	}
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "3186 FIND node filter %p NOT FOUND.\n", filter);
+	return NULL;
+}
+
+/*
+ * This routine looks up the ndlp lists for the given RPI. If rpi found it
+ * returns the node list element pointer else return NULL.
+ */
+struct lpfc_nodelist *
+__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
+}
+
+/*
+ * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
+ * returns the node element list pointer else return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp;
+}
+
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp = __lpfc_findnode_rpi(vport, rpi);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ *    NULL - No vport with the matching @vpi found
+ *    Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+	struct lpfc_vport *vport;
+	unsigned long flags;
+	int i = 0;
+
+	/* The physical ports are always vpi 0 - translate is unnecessary. */
+	if (vpi > 0) {
+		/*
+		 * Translate the physical vpi to the logical vpi.  The
+		 * vport stores the logical vpi.
+		 */
+		for (i = 0; i < phba->max_vpi; i++) {
+			if (vpi == phba->vpi_ids[i])
+				break;
+		}
+
+		if (i >= phba->max_vpi) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+					 "2936 Could not find Vport mapped "
+					 "to vpi %d\n", vpi);
+			return NULL;
+		}
+	}
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport, &phba->port_list, listentry) {
+		if (vport->vpi == i) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return vport;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return NULL;
+}
+
+struct lpfc_nodelist *
+lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
+{
+	struct lpfc_nodelist *ndlp;
+	int rpi = LPFC_RPI_ALLOC_ERROR;
+
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+		rpi = lpfc_sli4_alloc_rpi(vport->phba);
+		if (rpi == LPFC_RPI_ALLOC_ERROR)
+			return NULL;
+	}
+
+	ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+	if (!ndlp) {
+		if (vport->phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(vport->phba, rpi);
+		return NULL;
+	}
+
+	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+
+	lpfc_initialize_node(vport, ndlp, did);
+	INIT_LIST_HEAD(&ndlp->nlp_listp);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+		ndlp->nlp_rpi = rpi;
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
+				 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+				 ndlp->nlp_flag,
+				 kref_read(&ndlp->kref),
+				 ndlp->nlp_usg_map, ndlp);
+
+		ndlp->active_rrqs_xri_bitmap =
+				mempool_alloc(vport->phba->active_rrq_pool,
+					      GFP_KERNEL);
+		if (ndlp->active_rrqs_xri_bitmap)
+			memset(ndlp->active_rrqs_xri_bitmap, 0,
+			       ndlp->phba->cfg_rrq_xri_bitmap_sz);
+	}
+
+
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+		"node init:       did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	return ndlp;
+}
+
+/* This routine releases all resources associated with a specifc NPort's ndlp
+ * and mempool_free's the nodelist.
+ */
+static void
+lpfc_nlp_release(struct kref *kref)
+{
+	struct lpfc_hba *phba;
+	unsigned long flags;
+	struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
+						  kref);
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+		"node release:    did:x%x flg:x%x type:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+			"0279 lpfc_nlp_release: ndlp:x%p did %x "
+			"usgmap:x%x refcnt:%d rpi:%x\n",
+			(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
+			kref_read(&ndlp->kref), ndlp->nlp_rpi);
+
+	/* remove ndlp from action. */
+	lpfc_nlp_remove(ndlp->vport, ndlp);
+
+	/* clear the ndlp active flag for all release cases */
+	phba = ndlp->phba;
+	spin_lock_irqsave(&phba->ndlp_lock, flags);
+	NLP_CLR_NODE_ACT(ndlp);
+	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+
+	/* free ndlp memory for final ndlp release */
+	if (NLP_CHK_FREE_REQ(ndlp)) {
+		kfree(ndlp->lat_data);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			mempool_free(ndlp->active_rrqs_xri_bitmap,
+				     ndlp->phba->active_rrq_pool);
+		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
+	}
+}
+
+/* This routine bumps the reference count for a ndlp structure to ensure
+ * that one discovery thread won't free a ndlp while another discovery thread
+ * is using it.
+ */
+struct lpfc_nodelist *
+lpfc_nlp_get(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba;
+	unsigned long flags;
+
+	if (ndlp) {
+		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+			"node get:        did:x%x flg:x%x refcnt:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag,
+			kref_read(&ndlp->kref));
+		/* The check of ndlp usage to prevent incrementing the
+		 * ndlp reference count that is in the process of being
+		 * released.
+		 */
+		phba = ndlp->phba;
+		spin_lock_irqsave(&phba->ndlp_lock, flags);
+		if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
+			spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+				"0276 lpfc_nlp_get: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+			return NULL;
+		} else
+			kref_get(&ndlp->kref);
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	}
+	return ndlp;
+}
+
+/* This routine decrements the reference count for a ndlp structure. If the
+ * count goes to 0, this indicates the the associated nodelist should be
+ * freed. Returning 1 indicates the ndlp resource has been released; on the
+ * other hand, returning 0 indicates the ndlp resource has not been released
+ * yet.
+ */
+int
+lpfc_nlp_put(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba;
+	unsigned long flags;
+
+	if (!ndlp)
+		return 1;
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+	"node put:        did:x%x flg:x%x refcnt:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag,
+		kref_read(&ndlp->kref));
+	phba = ndlp->phba;
+	spin_lock_irqsave(&phba->ndlp_lock, flags);
+	/* Check the ndlp memory free acknowledge flag to avoid the
+	 * possible race condition that kref_put got invoked again
+	 * after previous one has done ndlp memory free.
+	 */
+	if (NLP_CHK_FREE_ACK(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+				"0274 lpfc_nlp_put: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+		return 1;
+	}
+	/* Check the ndlp inactivate log flag to avoid the possible
+	 * race condition that kref_put got invoked again after ndlp
+	 * is already in inactivating state.
+	 */
+	if (NLP_CHK_IACT_REQ(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+				"0275 lpfc_nlp_put: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				kref_read(&ndlp->kref));
+		return 1;
+	}
+	/* For last put, mark the ndlp usage flags to make sure no
+	 * other kref_get and kref_put on the same ndlp shall get
+	 * in between the process when the final kref_put has been
+	 * invoked on this ndlp.
+	 */
+	if (kref_read(&ndlp->kref) == 1) {
+		/* Indicate ndlp is put to inactive state. */
+		NLP_SET_IACT_REQ(ndlp);
+		/* Acknowledge ndlp memory free has been seen. */
+		if (NLP_CHK_FREE_REQ(ndlp))
+			NLP_SET_FREE_ACK(ndlp);
+	}
+	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	/* Note, the kref_put returns 1 when decrementing a reference
+	 * count that was 1, it invokes the release callback function,
+	 * but it still left the reference count as 1 (not actually
+	 * performs the last decrementation). Otherwise, it actually
+	 * decrements the reference count and returns 0.
+	 */
+	return kref_put(&ndlp->kref, lpfc_nlp_release);
+}
+
+/* This routine free's the specified nodelist if it is not in use
+ * by any other discovery thread. This routine returns 1 if the
+ * ndlp has been freed. A return value of 0 indicates the ndlp is
+ * not yet been released.
+ */
+int
+lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+{
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+		"node not used:   did:x%x flg:x%x refcnt:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag,
+		kref_read(&ndlp->kref));
+	if (kref_read(&ndlp->kref) == 1)
+		if (lpfc_nlp_put(ndlp))
+			return 1;
+	return 0;
+}
+
+/**
+ * lpfc_fcf_inuse - Check if FCF can be unregistered.
+ * @phba: Pointer to hba context object.
+ *
+ * This function iterate through all FC nodes associated
+ * will all vports to check if there is any node with
+ * fc_rports associated with it. If there is an fc_rport
+ * associated with the node, then the node is either in
+ * discovered state or its devloss_timer is pending.
+ */
+static int
+lpfc_fcf_inuse(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i, ret = 0;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+
+	vports = lpfc_create_vport_work_array(phba);
+
+	/* If driver cannot allocate memory, indicate fcf is in use */
+	if (!vports)
+		return 1;
+
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		shost = lpfc_shost_from_vport(vports[i]);
+		spin_lock_irq(shost->host_lock);
+		/*
+		 * IF the CVL_RCVD bit is not set then we have sent the
+		 * flogi.
+		 * If dev_loss fires while we are waiting we do not want to
+		 * unreg the fcf.
+		 */
+		if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+			spin_unlock_irq(shost->host_lock);
+			ret =  1;
+			goto out;
+		}
+		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+			if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+			  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+				ret = 1;
+				spin_unlock_irq(shost->host_lock);
+				goto out;
+			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+				ret = 1;
+				lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+						"2624 RPI %x DID %x flag %x "
+						"still logged in\n",
+						ndlp->nlp_rpi, ndlp->nlp_DID,
+						ndlp->nlp_flag);
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+	}
+out:
+	lpfc_destroy_vport_work_array(phba, vports);
+	return ret;
+}
+
+/**
+ * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+void
+lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2555 UNREG_VFI mbxStatus error x%x "
+			"HBA state x%x\n",
+			mboxq->u.mb.mbxStatus, vport->port_state);
+	}
+	spin_lock_irq(shost->host_lock);
+	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
+	spin_unlock_irq(shost->host_lock);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2550 UNREG_FCFI mbxStatus error x%x "
+			"HBA state x%x\n",
+			mboxq->u.mb.mbxStatus, vport->port_state);
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_unregister_fcf_prep - Unregister fcf record preparation
+ * @phba: Pointer to hba context object.
+ *
+ * This function prepare the HBA for unregistering the currently registered
+ * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
+ * VFIs.
+ */
+int
+lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	int i = 0, rc;
+
+	/* Unregister RPIs */
+	if (lpfc_fcf_inuse(phba))
+		lpfc_unreg_hba_rpis(phba);
+
+	/* At this point, all discovery is aborted */
+	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+	/* Unregister VPIs */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			/* Stop FLOGI/FDISC retries */
+			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+			if (ndlp)
+				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+			lpfc_cleanup_pending_mbox(vports[i]);
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_unreg_all_rpis(vports[i]);
+			lpfc_mbx_unreg_vpi(vports[i]);
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+			spin_unlock_irq(shost->host_lock);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+		ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+		if (ndlp)
+			lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+		lpfc_cleanup_pending_mbox(phba->pport);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(phba->pport);
+		lpfc_mbx_unreg_vpi(phba->pport);
+		shost = lpfc_shost_from_vport(phba->pport);
+		spin_lock_irq(shost->host_lock);
+		phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Unregister the physical port VFI */
+	rc = lpfc_issue_unreg_vfi(phba->pport);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
+ * @phba: Pointer to hba context object.
+ *
+ * This function issues synchronous unregister FCF mailbox command to HBA to
+ * unregister the currently registered FCF record. The driver does not reset
+ * the driver FCF usage state flags.
+ *
+ * Return 0 if successfully issued, none-zero otherwise.
+ */
+int
+lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2551 UNREG_FCFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2552 Unregister FCFI command failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
+ * @phba: Pointer to hba context object.
+ *
+ * This function unregisters the currently reigstered FCF. This function
+ * also tries to find another FCF for discovery by rescan the HBA FCF table.
+ */
+void
+lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/* Preparation for unregistering fcf */
+	rc = lpfc_unregister_fcf_prep(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2748 Failed to prepare for unregistering "
+				"HBA's FCF record: rc=%d\n", rc);
+		return;
+	}
+
+	/* Now, unregister FCF record and reset HBA FCF state */
+	rc = lpfc_sli4_unregister_fcf(phba);
+	if (rc)
+		return;
+	/* Reset HBA FCF states after successful unregister FCF */
+	phba->fcf.fcf_flag = 0;
+	phba->fcf.current_rec.flag = 0;
+
+	/*
+	 * If driver is not unloading, check if there is any other
+	 * FCF record that can be used for discovery.
+	 */
+	if ((phba->pport->load_flag & FC_UNLOADING) ||
+	    (phba->link_state < LPFC_LINK_UP))
+		return;
+
+	/* This is considered as the initial FCF discovery scan */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= FCF_INIT_DISC;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Reset FCF roundrobin bmask for new discovery */
+	lpfc_sli4_clear_fcf_rr_bmask(phba);
+
+	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2553 lpfc_unregister_unused_fcf failed "
+				"to read FCF record HBA state x%x\n",
+				phba->pport->port_state);
+	}
+}
+
+/**
+ * lpfc_unregister_fcf - Unregister the currently registered fcf record
+ * @phba: Pointer to hba context object.
+ *
+ * This function just unregisters the currently reigstered FCF. It does not
+ * try to find another FCF for discovery.
+ */
+void
+lpfc_unregister_fcf(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/* Preparation for unregistering fcf */
+	rc = lpfc_unregister_fcf_prep(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2749 Failed to prepare for unregistering "
+				"HBA's FCF record: rc=%d\n", rc);
+		return;
+	}
+
+	/* Now, unregister FCF record and reset HBA FCF state */
+	rc = lpfc_sli4_unregister_fcf(phba);
+	if (rc)
+		return;
+	/* Set proper HBA FCF states after successful unregister FCF */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+	/*
+	 * If HBA is not running in FIP mode, if HBA does not support
+	 * FCoE, if FCF discovery is ongoing, or if FCF has not been
+	 * registered, do nothing.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
+	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
+	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
+	    (phba->pport->port_state == LPFC_FLOGI)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	if (lpfc_fcf_inuse(phba))
+		return;
+
+	lpfc_unregister_fcf_rescan(phba);
+}
+
+/**
+ * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCF connection table as in the config
+ *         region.
+ * This function create driver data structure for the FCF connection
+ * record table read from config region 23.
+ */
+static void
+lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
+	uint8_t *buff)
+{
+	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+	struct lpfc_fcf_conn_hdr *conn_hdr;
+	struct lpfc_fcf_conn_rec *conn_rec;
+	uint32_t record_count;
+	int i;
+
+	/* Free the current connect table */
+	list_for_each_entry_safe(conn_entry, next_conn_entry,
+		&phba->fcf_conn_rec_list, list) {
+		list_del_init(&conn_entry->list);
+		kfree(conn_entry);
+	}
+
+	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
+	record_count = conn_hdr->length * sizeof(uint32_t)/
+		sizeof(struct lpfc_fcf_conn_rec);
+
+	conn_rec = (struct lpfc_fcf_conn_rec *)
+		(buff + sizeof(struct lpfc_fcf_conn_hdr));
+
+	for (i = 0; i < record_count; i++) {
+		if (!(conn_rec[i].flags & FCFCNCT_VALID))
+			continue;
+		conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
+			GFP_KERNEL);
+		if (!conn_entry) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2566 Failed to allocate connection"
+				" table entry\n");
+			return;
+		}
+
+		memcpy(&conn_entry->conn_rec, &conn_rec[i],
+			sizeof(struct lpfc_fcf_conn_rec));
+		list_add_tail(&conn_entry->list,
+			&phba->fcf_conn_rec_list);
+	}
+
+	if (!list_empty(&phba->fcf_conn_rec_list)) {
+		i = 0;
+		list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
+				    list) {
+			conn_rec = &conn_entry->conn_rec;
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"3345 FCF connection list rec[%02d]: "
+					"flags:x%04x, vtag:x%04x, "
+					"fabric_name:x%02x:%02x:%02x:%02x:"
+					"%02x:%02x:%02x:%02x, "
+					"switch_name:x%02x:%02x:%02x:%02x:"
+					"%02x:%02x:%02x:%02x\n", i++,
+					conn_rec->flags, conn_rec->vlan_tag,
+					conn_rec->fabric_name[0],
+					conn_rec->fabric_name[1],
+					conn_rec->fabric_name[2],
+					conn_rec->fabric_name[3],
+					conn_rec->fabric_name[4],
+					conn_rec->fabric_name[5],
+					conn_rec->fabric_name[6],
+					conn_rec->fabric_name[7],
+					conn_rec->switch_name[0],
+					conn_rec->switch_name[1],
+					conn_rec->switch_name[2],
+					conn_rec->switch_name[3],
+					conn_rec->switch_name[4],
+					conn_rec->switch_name[5],
+					conn_rec->switch_name[6],
+					conn_rec->switch_name[7]);
+		}
+	}
+}
+
+/**
+ * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCoE parameter data structure.
+ *
+ *  This function update driver data structure with config
+ *  parameters read from config region 23.
+ */
+static void
+lpfc_read_fcoe_param(struct lpfc_hba *phba,
+			uint8_t *buff)
+{
+	struct lpfc_fip_param_hdr *fcoe_param_hdr;
+	struct lpfc_fcoe_params *fcoe_param;
+
+	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
+		buff;
+	fcoe_param = (struct lpfc_fcoe_params *)
+		(buff + sizeof(struct lpfc_fip_param_hdr));
+
+	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
+		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
+		return;
+
+	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
+		phba->valid_vlan = 1;
+		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
+			0xFFF;
+	}
+
+	phba->fc_map[0] = fcoe_param->fc_map[0];
+	phba->fc_map[1] = fcoe_param->fc_map[1];
+	phba->fc_map[2] = fcoe_param->fc_map[2];
+	return;
+}
+
+/**
+ * lpfc_get_rec_conf23 - Get a record type in config region data.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ * @rec_type: Record type to be searched.
+ *
+ * This function searches config region data to find the beginning
+ * of the record specified by record_type. If record found, this
+ * function return pointer to the record else return NULL.
+ */
+static uint8_t *
+lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
+{
+	uint32_t offset = 0, rec_length;
+
+	if ((buff[0] == LPFC_REGION23_LAST_REC) ||
+		(size < sizeof(uint32_t)))
+		return NULL;
+
+	rec_length = buff[offset + 1];
+
+	/*
+	 * One TLV record has one word header and number of data words
+	 * specified in the rec_length field of the record header.
+	 */
+	while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
+		<= size) {
+		if (buff[offset] == rec_type)
+			return &buff[offset];
+
+		if (buff[offset] == LPFC_REGION23_LAST_REC)
+			return NULL;
+
+		offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
+		rec_length = buff[offset + 1];
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
+ * @phba: Pointer to lpfc_hba data structure.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ *
+ * This function parses the FCoE config parameters in config region 23 and
+ * populate driver data structure with the parameters.
+ */
+void
+lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
+		uint8_t *buff,
+		uint32_t size)
+{
+	uint32_t offset = 0;
+	uint8_t *rec_ptr;
+
+	/*
+	 * If data size is less than 2 words signature and version cannot be
+	 * verified.
+	 */
+	if (size < 2*sizeof(uint32_t))
+		return;
+
+	/* Check the region signature first */
+	if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2567 Config region 23 has bad signature\n");
+		return;
+	}
+
+	offset += 4;
+
+	/* Check the data structure version */
+	if (buff[offset] != LPFC_REGION23_VERSION) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2568 Config region 23 has bad version\n");
+		return;
+	}
+	offset += 4;
+
+	/* Read FCoE param record */
+	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+			size - offset, FCOE_PARAM_TYPE);
+	if (rec_ptr)
+		lpfc_read_fcoe_param(phba, rec_ptr);
+
+	/* Read FCF connection table */
+	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+		size - offset, FCOE_CONN_TBL_TYPE);
+	if (rec_ptr)
+		lpfc_read_fcf_conn_tbl(phba, rec_ptr);
+
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hw.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hw.h
new file mode 100644
index 0000000..12e033b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hw.h
@@ -0,0 +1,4269 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define FDMI_DID        0xfffffaU
+#define NameServer_DID  0xfffffcU
+#define SCR_DID         0xfffffdU
+#define Fabric_DID      0xfffffeU
+#define Bcast_DID       0xffffffU
+#define Mask_DID        0xffffffU
+#define CT_DID_MASK     0xffff00U
+#define Fabric_DID_MASK 0xfff000U
+#define WELL_KNOWN_DID_MASK 0xfffff0U
+
+#define PT2PT_LocalID	1
+#define PT2PT_RemoteID	2
+
+#define FF_DEF_EDTOV          2000	/* Default E_D_TOV (2000ms) */
+#define FF_DEF_ALTOV            15	/* Default AL_TIME (15ms) */
+#define FF_DEF_RATOV            10	/* Default RA_TOV (10s) */
+#define FF_DEF_ARBTOV         1900	/* Default ARB_TOV (1900ms) */
+
+#define LPFC_BUF_RING0        64	/* Number of buffers to post to RING
+					   0 */
+
+#define FCELSSIZE             1024	/* maximum ELS transfer size */
+
+#define LPFC_FCP_RING            0	/* ring 0 for FCP initiator commands */
+#define LPFC_EXTRA_RING          1	/* ring 1 for other protocols */
+#define LPFC_ELS_RING            2	/* ring 2 for ELS commands */
+
+#define SLI2_IOCB_CMD_R0_ENTRIES    172	/* SLI-2 FCP command ring entries */
+#define SLI2_IOCB_RSP_R0_ENTRIES    134	/* SLI-2 FCP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES      4	/* SLI-2 extra command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES      4	/* SLI-2 extra response ring entries */
+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36	/* SLI-2 extra FCP cmd ring entries */
+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52	/* SLI-2 extra FCP rsp ring entries */
+#define SLI2_IOCB_CMD_R2_ENTRIES     20	/* SLI-2 ELS command ring entries */
+#define SLI2_IOCB_RSP_R2_ENTRIES     20	/* SLI-2 ELS response ring entries */
+#define SLI2_IOCB_CMD_R3_ENTRIES      0
+#define SLI2_IOCB_RSP_R3_ENTRIES      0
+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+
+#define SLI2_IOCB_CMD_SIZE	32
+#define SLI2_IOCB_RSP_SIZE	32
+#define SLI3_IOCB_CMD_SIZE	128
+#define SLI3_IOCB_RSP_SIZE	64
+
+#define LPFC_UNREG_ALL_RPIS_VPORT	0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS	0xffffffff
+
+/* vendor ID used in SCSI netlink calls */
+#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
+
+#define FW_REV_STR_SIZE	32
+/* Common Transport structures and definitions */
+
+union CtRevisionId {
+	/* Structure is in Big Endian format */
+	struct {
+		uint32_t Revision:8;
+		uint32_t InId:24;
+	} bits;
+	uint32_t word;
+};
+
+union CtCommandResponse {
+	/* Structure is in Big Endian format */
+	struct {
+		uint32_t CmdRsp:16;
+		uint32_t Size:16;
+	} bits;
+	uint32_t word;
+};
+
+/* FC4 Feature bits for RFF_ID */
+#define FC4_FEATURE_TARGET	0x1
+#define FC4_FEATURE_INIT	0x2
+#define FC4_FEATURE_NVME_DISC	0x4
+
+struct lpfc_sli_ct_request {
+	/* Structure is in Big Endian format */
+	union CtRevisionId RevisionId;
+	uint8_t FsType;
+	uint8_t FsSubType;
+	uint8_t Options;
+	uint8_t Rsrvd1;
+	union CtCommandResponse CommandResponse;
+	uint8_t Rsrvd2;
+	uint8_t ReasonCode;
+	uint8_t Explanation;
+	uint8_t VendorUnique;
+#define LPFC_CT_PREAMBLE	20	/* Size of CTReq + 4 up to here */
+
+	union {
+		uint32_t PortID;
+		struct gid {
+			uint8_t PortType;	/* for GID_PT requests */
+			uint8_t DomainScope;
+			uint8_t AreaScope;
+			uint8_t Fc4Type;	/* for GID_FT requests */
+		} gid;
+		struct gid_ff {
+			uint8_t Flags;
+			uint8_t DomainScope;
+			uint8_t AreaScope;
+			uint8_t rsvd1;
+			uint8_t rsvd2;
+			uint8_t rsvd3;
+			uint8_t Fc4FBits;
+			uint8_t Fc4Type;
+		} gid_ff;
+		struct rft {
+			uint32_t PortId;	/* For RFT_ID requests */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint32_t rsvd0:16;
+			uint32_t rsvd1:7;
+			uint32_t fcpReg:1;	/* Type 8 */
+			uint32_t rsvd2:2;
+			uint32_t ipReg:1;	/* Type 5 */
+			uint32_t rsvd3:5;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint32_t rsvd0:16;
+			uint32_t fcpReg:1;	/* Type 8 */
+			uint32_t rsvd1:7;
+			uint32_t rsvd3:5;
+			uint32_t ipReg:1;	/* Type 5 */
+			uint32_t rsvd2:2;
+#endif
+
+			uint32_t rsvd[7];
+		} rft;
+		struct rnn {
+			uint32_t PortId;	/* For RNN_ID requests */
+			uint8_t wwnn[8];
+		} rnn;
+		struct rsnn {	/* For RSNN_ID requests */
+			uint8_t wwnn[8];
+			uint8_t len;
+			uint8_t symbname[255];
+		} rsnn;
+		struct da_id { /* For DA_ID requests */
+			uint32_t port_id;
+		} da_id;
+		struct rspn {	/* For RSPN_ID requests */
+			uint32_t PortId;
+			uint8_t len;
+			uint8_t symbname[255];
+		} rspn;
+		struct gff {
+			uint32_t PortId;
+		} gff;
+		struct gff_acc {
+			uint8_t fbits[128];
+		} gff_acc;
+		struct gft {
+			uint32_t PortId;
+		} gft;
+		struct gft_acc {
+			uint32_t fc4_types[8];
+		} gft_acc;
+#define FCP_TYPE_FEATURE_OFFSET 7
+		struct rff {
+			uint32_t PortId;
+			uint8_t reserved[2];
+			uint8_t fbits;
+			uint8_t type_code;     /* type=8 for FCP */
+		} rff;
+	} un;
+};
+
+#define LPFC_MAX_CT_SIZE	(60 * 4096)
+
+#define  SLI_CT_REVISION        1
+#define  GID_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct gid))
+#define  GIDFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct gid_ff))
+#define  GFF_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct gff))
+#define  GFT_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct gft))
+#define  RFT_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rft))
+#define  RFF_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rff))
+#define  RNN_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rnn))
+#define  RSNN_REQUEST_SZ  (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rsnn))
+#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+			  sizeof(struct da_id))
+#define  RSPN_REQUEST_SZ  (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rspn))
+
+/*
+ * FsType Definitions
+ */
+
+#define  SLI_CT_MANAGEMENT_SERVICE        0xFA
+#define  SLI_CT_TIME_SERVICE              0xFB
+#define  SLI_CT_DIRECTORY_SERVICE         0xFC
+#define  SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
+
+/*
+ * Directory Service Subtypes
+ */
+
+#define  SLI_CT_DIRECTORY_NAME_SERVER     0x02
+
+/*
+ * Response Codes
+ */
+
+#define  SLI_CT_RESPONSE_FS_RJT           0x8001
+#define  SLI_CT_RESPONSE_FS_ACC           0x8002
+
+/*
+ * Reason Codes
+ */
+
+#define  SLI_CT_NO_ADDITIONAL_EXPL	  0x0
+#define  SLI_CT_INVALID_COMMAND           0x01
+#define  SLI_CT_INVALID_VERSION           0x02
+#define  SLI_CT_LOGICAL_ERROR             0x03
+#define  SLI_CT_INVALID_IU_SIZE           0x04
+#define  SLI_CT_LOGICAL_BUSY              0x05
+#define  SLI_CT_PROTOCOL_ERROR            0x07
+#define  SLI_CT_UNABLE_TO_PERFORM_REQ     0x09
+#define  SLI_CT_REQ_NOT_SUPPORTED         0x0b
+#define  SLI_CT_HBA_INFO_NOT_REGISTERED	  0x10
+#define  SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE  0x11
+#define  SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN      0x12
+#define  SLI_CT_HBA_ATTR_NOT_PRESENT	  0x13
+#define  SLI_CT_PORT_INFO_NOT_REGISTERED  0x20
+#define  SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
+#define  SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN     0x22
+#define  SLI_CT_VENDOR_UNIQUE             0xff
+
+/*
+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
+ */
+
+#define  SLI_CT_NO_PORT_ID                0x01
+#define  SLI_CT_NO_PORT_NAME              0x02
+#define  SLI_CT_NO_NODE_NAME              0x03
+#define  SLI_CT_NO_CLASS_OF_SERVICE       0x04
+#define  SLI_CT_NO_IP_ADDRESS             0x05
+#define  SLI_CT_NO_IPA                    0x06
+#define  SLI_CT_NO_FC4_TYPES              0x07
+#define  SLI_CT_NO_SYMBOLIC_PORT_NAME     0x08
+#define  SLI_CT_NO_SYMBOLIC_NODE_NAME     0x09
+#define  SLI_CT_NO_PORT_TYPE              0x0A
+#define  SLI_CT_ACCESS_DENIED             0x10
+#define  SLI_CT_INVALID_PORT_ID           0x11
+#define  SLI_CT_DATABASE_EMPTY            0x12
+
+/*
+ * Name Server Command Codes
+ */
+
+#define  SLI_CTNS_GA_NXT      0x0100
+#define  SLI_CTNS_GPN_ID      0x0112
+#define  SLI_CTNS_GNN_ID      0x0113
+#define  SLI_CTNS_GCS_ID      0x0114
+#define  SLI_CTNS_GFT_ID      0x0117
+#define  SLI_CTNS_GSPN_ID     0x0118
+#define  SLI_CTNS_GPT_ID      0x011A
+#define  SLI_CTNS_GFF_ID      0x011F
+#define  SLI_CTNS_GID_PN      0x0121
+#define  SLI_CTNS_GID_NN      0x0131
+#define  SLI_CTNS_GIP_NN      0x0135
+#define  SLI_CTNS_GIPA_NN     0x0136
+#define  SLI_CTNS_GSNN_NN     0x0139
+#define  SLI_CTNS_GNN_IP      0x0153
+#define  SLI_CTNS_GIPA_IP     0x0156
+#define  SLI_CTNS_GID_FT      0x0171
+#define  SLI_CTNS_GID_FF      0x01F1
+#define  SLI_CTNS_GID_PT      0x01A1
+#define  SLI_CTNS_RPN_ID      0x0212
+#define  SLI_CTNS_RNN_ID      0x0213
+#define  SLI_CTNS_RCS_ID      0x0214
+#define  SLI_CTNS_RFT_ID      0x0217
+#define  SLI_CTNS_RSPN_ID     0x0218
+#define  SLI_CTNS_RPT_ID      0x021A
+#define  SLI_CTNS_RFF_ID      0x021F
+#define  SLI_CTNS_RIP_NN      0x0235
+#define  SLI_CTNS_RIPA_NN     0x0236
+#define  SLI_CTNS_RSNN_NN     0x0239
+#define  SLI_CTNS_DA_ID       0x0300
+
+/*
+ * Port Types
+ */
+
+#define SLI_CTPT_N_PORT		0x01
+#define SLI_CTPT_NL_PORT	0x02
+#define SLI_CTPT_FNL_PORT	0x03
+#define SLI_CTPT_IP		0x04
+#define SLI_CTPT_FCP		0x08
+#define SLI_CTPT_NVME		0x28
+#define SLI_CTPT_NX_PORT	0x7F
+#define SLI_CTPT_F_PORT		0x81
+#define SLI_CTPT_FL_PORT	0x82
+#define SLI_CTPT_E_PORT		0x84
+
+#define SLI_CT_LAST_ENTRY     0x80000000
+
+/* Fibre Channel Service Parameter definitions */
+
+#define FC_PH_4_0   6		/* FC-PH version 4.0 */
+#define FC_PH_4_1   7		/* FC-PH version 4.1 */
+#define FC_PH_4_2   8		/* FC-PH version 4.2 */
+#define FC_PH_4_3   9		/* FC-PH version 4.3 */
+
+#define FC_PH_LOW   8		/* Lowest supported FC-PH version */
+#define FC_PH_HIGH  9		/* Highest supported FC-PH version */
+#define FC_PH3   0x20		/* FC-PH-3 version */
+
+#define FF_FRAME_SIZE     2048
+
+struct lpfc_name {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t nameType:4;	/* FC Word 0, bit 28:31 */
+			uint8_t IEEEextMsn:4;	/* FC Word 0, bit 24:27, bit
+						   8:11 of IEEE ext */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint8_t IEEEextMsn:4;	/* FC Word 0, bit 24:27, bit
+						   8:11 of IEEE ext */
+			uint8_t nameType:4;	/* FC Word 0, bit 28:31 */
+#endif
+
+#define NAME_IEEE           0x1	/* IEEE name - nameType */
+#define NAME_IEEE_EXT       0x2	/* IEEE extended name */
+#define NAME_FC_TYPE        0x3	/* FC native name type */
+#define NAME_IP_TYPE        0x4	/* IP address */
+#define NAME_CCITT_TYPE     0xC
+#define NAME_CCITT_GR_TYPE  0xE
+			uint8_t IEEEextLsb;	/* FC Word 0, bit 16:23, IEEE
+						   extended Lsb */
+			uint8_t IEEE[6];	/* FC IEEE address */
+		} s;
+		uint8_t wwn[8];
+		uint64_t name;
+	} u;
+};
+
+struct csp {
+	uint8_t fcphHigh;	/* FC Word 0, byte 0 */
+	uint8_t fcphLow;
+	uint8_t bbCreditMsb;
+	uint8_t bbCreditLsb;	/* FC Word 0, byte 3 */
+
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
+/*
+ * Word 1 Bit 29 in common service parameter is overloaded.
+ * Word 1 Bit 29 in FLOGI response is multiple NPort assignment
+ * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level
+ */
+#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
+	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
+	uint16_t response_multiple_NPort:1;	/* FC Word 1, bit 29 */
+	uint16_t fPort:1;	/* FC Word 1, bit 28 */
+	uint16_t altBbCredit:1;	/* FC Word 1, bit 27 */
+	uint16_t edtovResolution:1;	/* FC Word 1, bit 26 */
+	uint16_t multicast:1;	/* FC Word 1, bit 25 */
+	uint16_t broadcast:1;	/* FC Word 1, bit 24 */
+
+	uint16_t huntgroup:1;	/* FC Word 1, bit 23 */
+	uint16_t simplex:1;	/* FC Word 1, bit 22 */
+	uint16_t word1Reserved1:3;	/* FC Word 1, bit 21:19 */
+	uint16_t dhd:1;		/* FC Word 1, bit 18 */
+	uint16_t contIncSeqCnt:1;	/* FC Word 1, bit 17 */
+	uint16_t payloadlength:1;	/* FC Word 1, bit 16 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t broadcast:1;	/* FC Word 1, bit 24 */
+	uint16_t multicast:1;	/* FC Word 1, bit 25 */
+	uint16_t edtovResolution:1;	/* FC Word 1, bit 26 */
+	uint16_t altBbCredit:1;	/* FC Word 1, bit 27 */
+	uint16_t fPort:1;	/* FC Word 1, bit 28 */
+	uint16_t response_multiple_NPort:1;	/* FC Word 1, bit 29 */
+	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
+	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
+
+	uint16_t payloadlength:1;	/* FC Word 1, bit 16 */
+	uint16_t contIncSeqCnt:1;	/* FC Word 1, bit 17 */
+	uint16_t dhd:1;		/* FC Word 1, bit 18 */
+	uint16_t word1Reserved1:3;	/* FC Word 1, bit 21:19 */
+	uint16_t simplex:1;	/* FC Word 1, bit 22 */
+	uint16_t huntgroup:1;	/* FC Word 1, bit 23 */
+#endif
+
+	uint8_t bbRcvSizeMsb;	/* Upper nibble is reserved */
+	uint8_t bbRcvSizeLsb;	/* FC Word 1, byte 3 */
+	union {
+		struct {
+			uint8_t word2Reserved1;	/* FC Word 2 byte 0 */
+
+			uint8_t totalConcurrSeq;	/* FC Word 2 byte 1 */
+			uint8_t roByCategoryMsb;	/* FC Word 2 byte 2 */
+
+			uint8_t roByCategoryLsb;	/* FC Word 2 byte 3 */
+		} nPort;
+		uint32_t r_a_tov;	/* R_A_TOV must be in B.E. format */
+	} w2;
+
+	uint32_t e_d_tov;	/* E_D_TOV must be in B.E. format */
+};
+
+struct class_parms {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t classValid:1;	/* FC Word 0, bit 31 */
+	uint8_t intermix:1;	/* FC Word 0, bit 30 */
+	uint8_t stackedXparent:1;	/* FC Word 0, bit 29 */
+	uint8_t stackedLockDown:1;	/* FC Word 0, bit 28 */
+	uint8_t seqDelivery:1;	/* FC Word 0, bit 27 */
+	uint8_t word0Reserved1:3;	/* FC Word 0, bit 24:26 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t word0Reserved1:3;	/* FC Word 0, bit 24:26 */
+	uint8_t seqDelivery:1;	/* FC Word 0, bit 27 */
+	uint8_t stackedLockDown:1;	/* FC Word 0, bit 28 */
+	uint8_t stackedXparent:1;	/* FC Word 0, bit 29 */
+	uint8_t intermix:1;	/* FC Word 0, bit 30 */
+	uint8_t classValid:1;	/* FC Word 0, bit 31 */
+
+#endif
+
+	uint8_t word0Reserved2;	/* FC Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t iCtlXidReAssgn:2;	/* FC Word 0, Bit 14:15 */
+	uint8_t iCtlInitialPa:2;	/* FC Word 0, bit 12:13 */
+	uint8_t iCtlAck0capable:1;	/* FC Word 0, bit 11 */
+	uint8_t iCtlAckNcapable:1;	/* FC Word 0, bit 10 */
+	uint8_t word0Reserved3:2;	/* FC Word 0, bit  8: 9 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t word0Reserved3:2;	/* FC Word 0, bit  8: 9 */
+	uint8_t iCtlAckNcapable:1;	/* FC Word 0, bit 10 */
+	uint8_t iCtlAck0capable:1;	/* FC Word 0, bit 11 */
+	uint8_t iCtlInitialPa:2;	/* FC Word 0, bit 12:13 */
+	uint8_t iCtlXidReAssgn:2;	/* FC Word 0, Bit 14:15 */
+#endif
+
+	uint8_t word0Reserved4;	/* FC Word 0, bit  0: 7 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t rCtlAck0capable:1;	/* FC Word 1, bit 31 */
+	uint8_t rCtlAckNcapable:1;	/* FC Word 1, bit 30 */
+	uint8_t rCtlXidInterlck:1;	/* FC Word 1, bit 29 */
+	uint8_t rCtlErrorPolicy:2;	/* FC Word 1, bit 27:28 */
+	uint8_t word1Reserved1:1;	/* FC Word 1, bit 26 */
+	uint8_t rCtlCatPerSeq:2;	/* FC Word 1, bit 24:25 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t rCtlCatPerSeq:2;	/* FC Word 1, bit 24:25 */
+	uint8_t word1Reserved1:1;	/* FC Word 1, bit 26 */
+	uint8_t rCtlErrorPolicy:2;	/* FC Word 1, bit 27:28 */
+	uint8_t rCtlXidInterlck:1;	/* FC Word 1, bit 29 */
+	uint8_t rCtlAckNcapable:1;	/* FC Word 1, bit 30 */
+	uint8_t rCtlAck0capable:1;	/* FC Word 1, bit 31 */
+#endif
+
+	uint8_t word1Reserved2;	/* FC Word 1, bit 16:23 */
+	uint8_t rcvDataSizeMsb;	/* FC Word 1, bit  8:15 */
+	uint8_t rcvDataSizeLsb;	/* FC Word 1, bit  0: 7 */
+
+	uint8_t concurrentSeqMsb;	/* FC Word 2, bit 24:31 */
+	uint8_t concurrentSeqLsb;	/* FC Word 2, bit 16:23 */
+	uint8_t EeCreditSeqMsb;	/* FC Word 2, bit  8:15 */
+	uint8_t EeCreditSeqLsb;	/* FC Word 2, bit  0: 7 */
+
+	uint8_t openSeqPerXchgMsb;	/* FC Word 3, bit 24:31 */
+	uint8_t openSeqPerXchgLsb;	/* FC Word 3, bit 16:23 */
+	uint8_t word3Reserved1;	/* Fc Word 3, bit  8:15 */
+	uint8_t word3Reserved2;	/* Fc Word 3, bit  0: 7 */
+};
+
+#define FAPWWN_KEY_VENDOR	0x42524344 /*valid vendor version fawwpn key*/
+
+struct serv_parm {	/* Structure is in Big Endian format */
+	struct csp cmn;
+	struct lpfc_name portName;
+	struct lpfc_name nodeName;
+	struct class_parms cls1;
+	struct class_parms cls2;
+	struct class_parms cls3;
+	struct class_parms cls4;
+	union {
+		uint8_t vendorVersion[16];
+		struct {
+			uint32_t vid;
+#define LPFC_VV_EMLX_ID	0x454d4c58	/* EMLX */
+			uint32_t flags;
+#define LPFC_VV_SUPPRESS_RSP	1
+		} vv;
+	} un;
+};
+
+/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+	 uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT		24
+#define fc_vft_hdr_r_ctl_MASK		0xFF
+#define fc_vft_hdr_r_ctl_WORD		word0
+#define fc_vft_hdr_ver_SHIFT		22
+#define fc_vft_hdr_ver_MASK		0x3
+#define fc_vft_hdr_ver_WORD		word0
+#define fc_vft_hdr_type_SHIFT		18
+#define fc_vft_hdr_type_MASK		0xF
+#define fc_vft_hdr_type_WORD		word0
+#define fc_vft_hdr_e_SHIFT		16
+#define fc_vft_hdr_e_MASK		0x1
+#define fc_vft_hdr_e_WORD		word0
+#define fc_vft_hdr_priority_SHIFT	13
+#define fc_vft_hdr_priority_MASK	0x7
+#define fc_vft_hdr_priority_WORD	word0
+#define fc_vft_hdr_vf_id_SHIFT		1
+#define fc_vft_hdr_vf_id_MASK		0xFFF
+#define fc_vft_hdr_vf_id_WORD		word0
+	uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT		24
+#define fc_vft_hdr_hopct_MASK		0xFF
+#define fc_vft_hdr_hopct_WORD		word1
+};
+
+/*
+ *  Extended Link Service LS_COMMAND codes (Payload Word 0)
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define ELS_CMD_MASK      0xffff0000
+#define ELS_RSP_MASK      0xff000000
+#define ELS_CMD_LS_RJT    0x01000000
+#define ELS_CMD_ACC       0x02000000
+#define ELS_CMD_PLOGI     0x03000000
+#define ELS_CMD_FLOGI     0x04000000
+#define ELS_CMD_LOGO      0x05000000
+#define ELS_CMD_ABTX      0x06000000
+#define ELS_CMD_RCS       0x07000000
+#define ELS_CMD_RES       0x08000000
+#define ELS_CMD_RSS       0x09000000
+#define ELS_CMD_RSI       0x0A000000
+#define ELS_CMD_ESTS      0x0B000000
+#define ELS_CMD_ESTC      0x0C000000
+#define ELS_CMD_ADVC      0x0D000000
+#define ELS_CMD_RTV       0x0E000000
+#define ELS_CMD_RLS       0x0F000000
+#define ELS_CMD_ECHO      0x10000000
+#define ELS_CMD_TEST      0x11000000
+#define ELS_CMD_RRQ       0x12000000
+#define ELS_CMD_REC       0x13000000
+#define ELS_CMD_RDP       0x18000000
+#define ELS_CMD_PRLI      0x20100014
+#define ELS_CMD_NVMEPRLI  0x20140018
+#define ELS_CMD_PRLO      0x21100014
+#define ELS_CMD_PRLO_ACC  0x02100014
+#define ELS_CMD_PDISC     0x50000000
+#define ELS_CMD_FDISC     0x51000000
+#define ELS_CMD_ADISC     0x52000000
+#define ELS_CMD_FARP      0x54000000
+#define ELS_CMD_FARPR     0x55000000
+#define ELS_CMD_RPS       0x56000000
+#define ELS_CMD_RPL       0x57000000
+#define ELS_CMD_FAN       0x60000000
+#define ELS_CMD_RSCN      0x61040000
+#define ELS_CMD_SCR       0x62000000
+#define ELS_CMD_RNID      0x78000000
+#define ELS_CMD_LIRR      0x7A000000
+#define ELS_CMD_LCB	  0x81000000
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+#define ELS_CMD_MASK      0xffff
+#define ELS_RSP_MASK      0xff
+#define ELS_CMD_LS_RJT    0x01
+#define ELS_CMD_ACC       0x02
+#define ELS_CMD_PLOGI     0x03
+#define ELS_CMD_FLOGI     0x04
+#define ELS_CMD_LOGO      0x05
+#define ELS_CMD_ABTX      0x06
+#define ELS_CMD_RCS       0x07
+#define ELS_CMD_RES       0x08
+#define ELS_CMD_RSS       0x09
+#define ELS_CMD_RSI       0x0A
+#define ELS_CMD_ESTS      0x0B
+#define ELS_CMD_ESTC      0x0C
+#define ELS_CMD_ADVC      0x0D
+#define ELS_CMD_RTV       0x0E
+#define ELS_CMD_RLS       0x0F
+#define ELS_CMD_ECHO      0x10
+#define ELS_CMD_TEST      0x11
+#define ELS_CMD_RRQ       0x12
+#define ELS_CMD_REC       0x13
+#define ELS_CMD_RDP	  0x18
+#define ELS_CMD_PRLI      0x14001020
+#define ELS_CMD_NVMEPRLI  0x18001420
+#define ELS_CMD_PRLO      0x14001021
+#define ELS_CMD_PRLO_ACC  0x14001002
+#define ELS_CMD_PDISC     0x50
+#define ELS_CMD_FDISC     0x51
+#define ELS_CMD_ADISC     0x52
+#define ELS_CMD_FARP      0x54
+#define ELS_CMD_FARPR     0x55
+#define ELS_CMD_RPS       0x56
+#define ELS_CMD_RPL       0x57
+#define ELS_CMD_FAN       0x60
+#define ELS_CMD_RSCN      0x0461
+#define ELS_CMD_SCR       0x62
+#define ELS_CMD_RNID      0x78
+#define ELS_CMD_LIRR      0x7A
+#define ELS_CMD_LCB	  0x81
+#endif
+
+/*
+ *  LS_RJT Payload Definition
+ */
+
+struct ls_rjt {	/* Structure is in Big Endian format */
+	union {
+		uint32_t lsRjtError;
+		struct {
+			uint8_t lsRjtRsvd0;	/* FC Word 0, bit 24:31 */
+
+			uint8_t lsRjtRsnCode;	/* FC Word 0, bit 16:23 */
+			/* LS_RJT reason codes */
+#define LSRJT_INVALID_CMD     0x01
+#define LSRJT_LOGICAL_ERR     0x03
+#define LSRJT_LOGICAL_BSY     0x05
+#define LSRJT_PROTOCOL_ERR    0x07
+#define LSRJT_UNABLE_TPC      0x09	/* Unable to perform command */
+#define LSRJT_CMD_UNSUPPORTED 0x0B
+#define LSRJT_VENDOR_UNIQUE   0xFF	/* See Byte 3 */
+
+			uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
+			/* LS_RJT reason explanation */
+#define LSEXP_NOTHING_MORE      0x00
+#define LSEXP_SPARM_OPTIONS     0x01
+#define LSEXP_SPARM_ICTL        0x03
+#define LSEXP_SPARM_RCTL        0x05
+#define LSEXP_SPARM_RCV_SIZE    0x07
+#define LSEXP_SPARM_CONCUR_SEQ  0x09
+#define LSEXP_SPARM_CREDIT      0x0B
+#define LSEXP_INVALID_PNAME     0x0D
+#define LSEXP_INVALID_NNAME     0x0E
+#define LSEXP_INVALID_CSP       0x0F
+#define LSEXP_INVALID_ASSOC_HDR 0x11
+#define LSEXP_ASSOC_HDR_REQ     0x13
+#define LSEXP_INVALID_O_SID     0x15
+#define LSEXP_INVALID_OX_RX     0x17
+#define LSEXP_CMD_IN_PROGRESS   0x19
+#define LSEXP_PORT_LOGIN_REQ    0x1E
+#define LSEXP_INVALID_NPORT_ID  0x1F
+#define LSEXP_INVALID_SEQ_ID    0x21
+#define LSEXP_INVALID_XCHG      0x23
+#define LSEXP_INACTIVE_XCHG     0x25
+#define LSEXP_RQ_REQUIRED       0x27
+#define LSEXP_OUT_OF_RESOURCE   0x29
+#define LSEXP_CANT_GIVE_DATA    0x2A
+#define LSEXP_REQ_UNSUPPORTED   0x2C
+			uint8_t vendorUnique;	/* FC Word 0, bit  0: 7 */
+		} b;
+	} un;
+};
+
+/*
+ *  N_Port Login (FLOGO/PLOGO Request) Payload Definition
+ */
+
+typedef struct _LOGO {		/* Structure is in Big Endian format */
+	union {
+		uint32_t nPortId32;	/* Access nPortId as a word */
+		struct {
+			uint8_t word1Reserved1;	/* FC Word 1, bit 31:24 */
+			uint8_t nPortIdByte0;	/* N_port  ID bit 16:23 */
+			uint8_t nPortIdByte1;	/* N_port  ID bit  8:15 */
+			uint8_t nPortIdByte2;	/* N_port  ID bit  0: 7 */
+		} b;
+	} un;
+	struct lpfc_name portName;	/* N_port name field */
+} LOGO;
+
+/*
+ *  FCP Login (PRLI Request / ACC) Payload Definition
+ */
+
+#define PRLX_PAGE_LEN   0x10
+#define TPRLO_PAGE_LEN  0x14
+
+typedef struct _PRLI {		/* Structure is in Big Endian format */
+	uint8_t prliType;	/* FC Parm Word 0, bit 24:31 */
+
+#define PRLI_FCP_TYPE 0x08
+#define PRLI_NVME_TYPE 0x28
+	uint8_t word0Reserved1;	/* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t estabImagePair:1;	/* FC Parm Word 0, bit 13 */
+
+	/*    ACC = imagePairEstablished */
+	uint8_t word0Reserved2:1;	/* FC Parm Word 0, bit 12 */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+	uint8_t word0Reserved2:1;	/* FC Parm Word 0, bit 12 */
+	uint8_t estabImagePair:1;	/* FC Parm Word 0, bit 13 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+	/*    ACC = imagePairEstablished */
+#endif
+
+#define PRLI_REQ_EXECUTED     0x1	/* acceptRspCode */
+#define PRLI_NO_RESOURCES     0x2
+#define PRLI_INIT_INCOMPLETE  0x3
+#define PRLI_NO_SUCH_PA       0x4
+#define PRLI_PREDEF_CONFIG    0x5
+#define PRLI_PARTIAL_SUCCESS  0x6
+#define PRLI_INVALID_PAGE_CNT 0x7
+	uint8_t word0Reserved3;	/* FC Parm Word 0, bit 0:7 */
+
+	uint32_t origProcAssoc;	/* FC Parm Word 1, bit 0:31 */
+
+	uint32_t respProcAssoc;	/* FC Parm Word 2, bit 0:31 */
+
+	uint8_t word3Reserved1;	/* FC Parm Word 3, bit 24:31 */
+	uint8_t word3Reserved2;	/* FC Parm Word 3, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t Word3bit15Resved:1;	/* FC Parm Word 3, bit 15 */
+	uint16_t Word3bit14Resved:1;	/* FC Parm Word 3, bit 14 */
+	uint16_t Word3bit13Resved:1;	/* FC Parm Word 3, bit 13 */
+	uint16_t Word3bit12Resved:1;	/* FC Parm Word 3, bit 12 */
+	uint16_t Word3bit11Resved:1;	/* FC Parm Word 3, bit 11 */
+	uint16_t Word3bit10Resved:1;	/* FC Parm Word 3, bit 10 */
+	uint16_t TaskRetryIdReq:1;	/* FC Parm Word 3, bit  9 */
+	uint16_t Retry:1;	/* FC Parm Word 3, bit  8 */
+	uint16_t ConfmComplAllowed:1;	/* FC Parm Word 3, bit  7 */
+	uint16_t dataOverLay:1;	/* FC Parm Word 3, bit  6 */
+	uint16_t initiatorFunc:1;	/* FC Parm Word 3, bit  5 */
+	uint16_t targetFunc:1;	/* FC Parm Word 3, bit  4 */
+	uint16_t cmdDataMixEna:1;	/* FC Parm Word 3, bit  3 */
+	uint16_t dataRspMixEna:1;	/* FC Parm Word 3, bit  2 */
+	uint16_t readXferRdyDis:1;	/* FC Parm Word 3, bit  1 */
+	uint16_t writeXferRdyDis:1;	/* FC Parm Word 3, bit  0 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t Retry:1;	/* FC Parm Word 3, bit  8 */
+	uint16_t TaskRetryIdReq:1;	/* FC Parm Word 3, bit  9 */
+	uint16_t Word3bit10Resved:1;	/* FC Parm Word 3, bit 10 */
+	uint16_t Word3bit11Resved:1;	/* FC Parm Word 3, bit 11 */
+	uint16_t Word3bit12Resved:1;	/* FC Parm Word 3, bit 12 */
+	uint16_t Word3bit13Resved:1;	/* FC Parm Word 3, bit 13 */
+	uint16_t Word3bit14Resved:1;	/* FC Parm Word 3, bit 14 */
+	uint16_t Word3bit15Resved:1;	/* FC Parm Word 3, bit 15 */
+	uint16_t writeXferRdyDis:1;	/* FC Parm Word 3, bit  0 */
+	uint16_t readXferRdyDis:1;	/* FC Parm Word 3, bit  1 */
+	uint16_t dataRspMixEna:1;	/* FC Parm Word 3, bit  2 */
+	uint16_t cmdDataMixEna:1;	/* FC Parm Word 3, bit  3 */
+	uint16_t targetFunc:1;	/* FC Parm Word 3, bit  4 */
+	uint16_t initiatorFunc:1;	/* FC Parm Word 3, bit  5 */
+	uint16_t dataOverLay:1;	/* FC Parm Word 3, bit  6 */
+	uint16_t ConfmComplAllowed:1;	/* FC Parm Word 3, bit  7 */
+#endif
+} PRLI;
+
+/*
+ *  FCP Logout (PRLO Request / ACC) Payload Definition
+ */
+
+typedef struct _PRLO {		/* Structure is in Big Endian format */
+	uint8_t prloType;	/* FC Parm Word 0, bit 24:31 */
+
+#define PRLO_FCP_TYPE  0x08
+	uint8_t word0Reserved1;	/* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t word0Reserved2:2;	/* FC Parm Word 0, bit 12:13 */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+	uint8_t word0Reserved2:2;	/* FC Parm Word 0, bit 12:13 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+#endif
+
+#define PRLO_REQ_EXECUTED     0x1	/* acceptRspCode */
+#define PRLO_NO_SUCH_IMAGE    0x4
+#define PRLO_INVALID_PAGE_CNT 0x7
+
+	uint8_t word0Reserved3;	/* FC Parm Word 0, bit 0:7 */
+
+	uint32_t origProcAssoc;	/* FC Parm Word 1, bit 0:31 */
+
+	uint32_t respProcAssoc;	/* FC Parm Word 2, bit 0:31 */
+
+	uint32_t word3Reserved1;	/* FC Parm Word 3, bit 0:31 */
+} PRLO;
+
+typedef struct _ADISC {		/* Structure is in Big Endian format */
+	uint32_t hardAL_PA;
+	struct lpfc_name portName;
+	struct lpfc_name nodeName;
+	uint32_t DID;
+} ADISC;
+
+typedef struct _FARP {		/* Structure is in Big Endian format */
+	uint32_t Mflags:8;
+	uint32_t Odid:24;
+#define FARP_NO_ACTION          0	/* FARP information enclosed, no
+					   action */
+#define FARP_MATCH_PORT         0x1	/* Match on Responder Port Name */
+#define FARP_MATCH_NODE         0x2	/* Match on Responder Node Name */
+#define FARP_MATCH_IP           0x4	/* Match on IP address, not supported */
+#define FARP_MATCH_IPV4         0x5	/* Match on IPV4 address, not
+					   supported */
+#define FARP_MATCH_IPV6         0x6	/* Match on IPV6 address, not
+					   supported */
+	uint32_t Rflags:8;
+	uint32_t Rdid:24;
+#define FARP_REQUEST_PLOGI      0x1	/* Request for PLOGI */
+#define FARP_REQUEST_FARPR      0x2	/* Request for FARP Response */
+	struct lpfc_name OportName;
+	struct lpfc_name OnodeName;
+	struct lpfc_name RportName;
+	struct lpfc_name RnodeName;
+	uint8_t Oipaddr[16];
+	uint8_t Ripaddr[16];
+} FARP;
+
+typedef struct _FAN {		/* Structure is in Big Endian format */
+	uint32_t Fdid;
+	struct lpfc_name FportName;
+	struct lpfc_name FnodeName;
+} FAN;
+
+typedef struct _SCR {		/* Structure is in Big Endian format */
+	uint8_t resvd1;
+	uint8_t resvd2;
+	uint8_t resvd3;
+	uint8_t Function;
+#define  SCR_FUNC_FABRIC     0x01
+#define  SCR_FUNC_NPORT      0x02
+#define  SCR_FUNC_FULL       0x03
+#define  SCR_CLEAR           0xff
+} SCR;
+
+typedef struct _RNID_TOP_DISC {
+	struct lpfc_name portName;
+	uint8_t resvd[8];
+	uint32_t unitType;
+#define RNID_HBA            0x7
+#define RNID_HOST           0xa
+#define RNID_DRIVER         0xd
+	uint32_t physPort;
+	uint32_t attachedNodes;
+	uint16_t ipVersion;
+#define RNID_IPV4           0x1
+#define RNID_IPV6           0x2
+	uint16_t UDPport;
+	uint8_t ipAddr[16];
+	uint16_t resvd1;
+	uint16_t flags;
+#define RNID_TD_SUPPORT     0x1
+#define RNID_LP_VALID       0x2
+} RNID_TOP_DISC;
+
+typedef struct _RNID {		/* Structure is in Big Endian format */
+	uint8_t Format;
+#define RNID_TOPOLOGY_DISC  0xdf
+	uint8_t CommonLen;
+	uint8_t resvd1;
+	uint8_t SpecificLen;
+	struct lpfc_name portName;
+	struct lpfc_name nodeName;
+	union {
+		RNID_TOP_DISC topologyDisc;	/* topology disc (0xdf) */
+	} un;
+} RNID;
+
+typedef struct  _RPS {		/* Structure is in Big Endian format */
+	union {
+		uint32_t portNum;
+		struct lpfc_name portName;
+	} un;
+} RPS;
+
+typedef struct  _RPS_RSP {	/* Structure is in Big Endian format */
+	uint16_t rsvd1;
+	uint16_t portStatus;
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+} RPS_RSP;
+
+struct RLS {			/* Structure is in Big Endian format */
+	uint32_t rls;
+#define rls_rsvd_SHIFT		24
+#define rls_rsvd_MASK		0x000000ff
+#define rls_rsvd_WORD		rls
+#define rls_did_SHIFT		0
+#define rls_did_MASK		0x00ffffff
+#define rls_did_WORD		rls
+};
+
+struct  RLS_RSP {		/* Structure is in Big Endian format */
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+};
+
+struct RRQ {			/* Structure is in Big Endian format */
+	uint32_t rrq;
+#define rrq_rsvd_SHIFT		24
+#define rrq_rsvd_MASK		0x000000ff
+#define rrq_rsvd_WORD		rrq
+#define rrq_did_SHIFT		0
+#define rrq_did_MASK		0x00ffffff
+#define rrq_did_WORD		rrq
+	uint32_t rrq_exchg;
+#define rrq_oxid_SHIFT		16
+#define rrq_oxid_MASK		0xffff
+#define rrq_oxid_WORD		rrq_exchg
+#define rrq_rxid_SHIFT		0
+#define rrq_rxid_MASK		0xffff
+#define rrq_rxid_WORD		rrq_exchg
+};
+
+#define LPFC_MAX_VFN_PER_PFN	255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN	0   /* Default VFs due to platform limitation*/
+
+struct RTV_RSP {		/* Structure is in Big Endian format */
+	uint32_t ratov;
+	uint32_t edtov;
+	uint32_t qtov;
+#define qtov_rsvd0_SHIFT	28
+#define qtov_rsvd0_MASK		0x0000000f
+#define qtov_rsvd0_WORD		qtov		/* reserved */
+#define qtov_edtovres_SHIFT	27
+#define qtov_edtovres_MASK	0x00000001
+#define qtov_edtovres_WORD	qtov		/* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT	19
+#define qtov_rsvd1_MASK		0x0000003f
+#define qtov_rsvd1_WORD		qtov		/* reserved */
+#define qtov_rttov_SHIFT	18
+#define qtov_rttov_MASK		0x00000001
+#define qtov_rttov_WORD		qtov		/* R_T_TOV value */
+#define qtov_rsvd2_SHIFT	0
+#define qtov_rsvd2_MASK		0x0003ffff
+#define qtov_rsvd2_WORD		qtov		/* reserved */
+};
+
+
+typedef struct  _RPL {		/* Structure is in Big Endian format */
+	uint32_t maxsize;
+	uint32_t index;
+} RPL;
+
+typedef struct  _PORT_NUM_BLK {
+	uint32_t portNum;
+	uint32_t portID;
+	struct lpfc_name portName;
+} PORT_NUM_BLK;
+
+typedef struct  _RPL_RSP {	/* Structure is in Big Endian format */
+	uint32_t listLen;
+	uint32_t index;
+	PORT_NUM_BLK port_num_blk;
+} RPL_RSP;
+
+/* This is used for RSCN command */
+typedef struct _D_ID {		/* Structure is in Big Endian format */
+	union {
+		uint32_t word;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t resv;
+			uint8_t domain;
+			uint8_t area;
+			uint8_t id;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint8_t id;
+			uint8_t area;
+			uint8_t domain;
+			uint8_t resv;
+#endif
+		} b;
+	} un;
+} D_ID;
+
+#define RSCN_ADDRESS_FORMAT_PORT	0x0
+#define RSCN_ADDRESS_FORMAT_AREA	0x1
+#define RSCN_ADDRESS_FORMAT_DOMAIN	0x2
+#define RSCN_ADDRESS_FORMAT_FABRIC	0x3
+#define RSCN_ADDRESS_FORMAT_MASK	0x3
+
+/*
+ *  Structure to define all ELS Payload types
+ */
+
+typedef struct _ELS_PKT {	/* Structure is in Big Endian format */
+	uint8_t elsCode;	/* FC Word 0, bit 24:31 */
+	uint8_t elsByte1;
+	uint8_t elsByte2;
+	uint8_t elsByte3;
+	union {
+		struct ls_rjt lsRjt;	/* Payload for LS_RJT ELS response */
+		struct serv_parm logi;	/* Payload for PLOGI/FLOGI/PDISC/ACC */
+		LOGO logo;	/* Payload for PLOGO/FLOGO/ACC */
+		PRLI prli;	/* Payload for PRLI/ACC */
+		PRLO prlo;	/* Payload for PRLO/ACC */
+		ADISC adisc;	/* Payload for ADISC/ACC */
+		FARP farp;	/* Payload for FARP/ACC */
+		FAN fan;	/* Payload for FAN */
+		SCR scr;	/* Payload for SCR/ACC */
+		RNID rnid;	/* Payload for RNID */
+		uint8_t pad[128 - 4];	/* Pad out to payload of 128 bytes */
+	} un;
+} ELS_PKT;
+
+/*
+ * Link Cable Beacon (LCB) ELS Frame
+ */
+
+struct fc_lcb_request_frame {
+	uint32_t      lcb_command;      /* ELS command opcode (0x81)     */
+	uint8_t       lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
+#define LPFC_LCB_ON    0x1
+#define LPFC_LCB_OFF   0x2
+	uint8_t       reserved[3];
+
+	uint8_t       lcb_type; /* LCB Payload Word 2, bit 24:31 */
+#define LPFC_LCB_GREEN 0x1
+#define LPFC_LCB_AMBER 0x2
+	uint8_t       lcb_frequency;    /* LCB Payload Word 2, bit 16:23 */
+	uint16_t      lcb_duration;     /* LCB Payload Word 2, bit 15:0  */
+};
+
+/*
+ * Link Cable Beacon (LCB) ELS Response Frame
+ */
+struct fc_lcb_res_frame {
+	uint32_t      lcb_ls_acc;       /* Acceptance of LCB request (0x02) */
+	uint8_t       lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
+	uint8_t       reserved[3];
+	uint8_t       lcb_type; /* LCB Payload Word 2, bit 24:31 */
+	uint8_t       lcb_frequency;    /* LCB Payload Word 2, bit 16:23 */
+	uint16_t      lcb_duration;     /* LCB Payload Word 2, bit 15:0  */
+};
+
+/*
+ * Read Diagnostic Parameters (RDP) ELS frame.
+ */
+#define SFF_PG0_IDENT_SFP              0x3
+
+#define SFP_FLAG_PT_OPTICAL            0x0
+#define SFP_FLAG_PT_SWLASER            0x01
+#define SFP_FLAG_PT_LWLASER_LC1310     0x02
+#define SFP_FLAG_PT_LWLASER_LL1550     0x03
+#define SFP_FLAG_PT_MASK               0x0F
+#define SFP_FLAG_PT_SHIFT              0
+
+#define SFP_FLAG_IS_OPTICAL_PORT       0x01
+#define SFP_FLAG_IS_OPTICAL_MASK       0x010
+#define SFP_FLAG_IS_OPTICAL_SHIFT      4
+
+#define SFP_FLAG_IS_DESC_VALID         0x01
+#define SFP_FLAG_IS_DESC_VALID_MASK    0x020
+#define SFP_FLAG_IS_DESC_VALID_SHIFT   5
+
+#define SFP_FLAG_CT_UNKNOWN            0x0
+#define SFP_FLAG_CT_SFP_PLUS           0x01
+#define SFP_FLAG_CT_MASK               0x3C
+#define SFP_FLAG_CT_SHIFT              6
+
+struct fc_rdp_port_name_info {
+	uint8_t wwnn[8];
+	uint8_t wwpn[8];
+};
+
+
+/*
+ * Link Error Status Block Structure (FC-FS-3) for RDP
+ * This similar to RPS ELS
+ */
+struct fc_link_status {
+	uint32_t      link_failure_cnt;
+	uint32_t      loss_of_synch_cnt;
+	uint32_t      loss_of_signal_cnt;
+	uint32_t      primitive_seq_proto_err;
+	uint32_t      invalid_trans_word;
+	uint32_t      invalid_crc_cnt;
+
+};
+
+#define RDP_PORT_NAMES_DESC_TAG  0x00010003
+struct fc_rdp_port_name_desc {
+	uint32_t	tag;     /* 0001 0003h */
+	uint32_t	length;  /* set to size of payload struct */
+	struct fc_rdp_port_name_info  port_names;
+};
+
+
+struct fc_rdp_fec_info {
+	uint32_t CorrectedBlocks;
+	uint32_t UncorrectableBlocks;
+};
+
+#define RDP_FEC_DESC_TAG  0x00010005
+struct fc_fec_rdp_desc {
+	uint32_t tag;
+	uint32_t length;
+	struct fc_rdp_fec_info info;
+};
+
+struct fc_rdp_link_error_status_payload_info {
+	struct fc_link_status link_status; /* 24 bytes */
+	uint32_t  port_type;             /* bits 31-30 only */
+};
+
+#define RDP_LINK_ERROR_STATUS_DESC_TAG  0x00010002
+struct fc_rdp_link_error_status_desc {
+	uint32_t         tag;     /* 0001 0002h */
+	uint32_t         length;  /* set to size of payload struct */
+	struct fc_rdp_link_error_status_payload_info info;
+};
+
+#define VN_PT_PHY_UNKNOWN      0x00
+#define VN_PT_PHY_PF_PORT      0x01
+#define VN_PT_PHY_ETH_MAC      0x10
+#define VN_PT_PHY_SHIFT                30
+
+#define RDP_PS_1GB             0x8000
+#define RDP_PS_2GB             0x4000
+#define RDP_PS_4GB             0x2000
+#define RDP_PS_10GB            0x1000
+#define RDP_PS_8GB             0x0800
+#define RDP_PS_16GB            0x0400
+#define RDP_PS_32GB            0x0200
+
+#define RDP_CAP_USER_CONFIGURED 0x0002
+#define RDP_CAP_UNKNOWN         0x0001
+#define RDP_PS_UNKNOWN          0x0002
+#define RDP_PS_NOT_ESTABLISHED  0x0001
+
+struct fc_rdp_port_speed {
+	uint16_t   capabilities;
+	uint16_t   speed;
+};
+
+struct fc_rdp_port_speed_info {
+	struct fc_rdp_port_speed   port_speed;
+};
+
+#define RDP_PORT_SPEED_DESC_TAG  0x00010001
+struct fc_rdp_port_speed_desc {
+	uint32_t         tag;            /* 00010001h */
+	uint32_t         length;         /* set to size of payload struct */
+	struct fc_rdp_port_speed_info info;
+};
+
+#define RDP_NPORT_ID_SIZE      4
+#define RDP_N_PORT_DESC_TAG    0x00000003
+struct fc_rdp_nport_desc {
+	uint32_t         tag;          /* 0000 0003h, big endian */
+	uint32_t         length;       /* size of RDP_N_PORT_ID struct */
+	uint32_t         nport_id : 12;
+	uint32_t         reserved : 8;
+};
+
+
+struct fc_rdp_link_service_info {
+	uint32_t         els_req;    /* Request payload word 0 value.*/
+};
+
+#define RDP_LINK_SERVICE_DESC_TAG  0x00000001
+struct fc_rdp_link_service_desc {
+	uint32_t         tag;     /* Descriptor tag  1 */
+	uint32_t         length;  /* set to size of payload struct. */
+	struct fc_rdp_link_service_info  payload;
+				  /* must be ELS req Word 0(0x18) */
+};
+
+struct fc_rdp_sfp_info {
+	uint16_t	temperature;
+	uint16_t	vcc;
+	uint16_t	tx_bias;
+	uint16_t	tx_power;
+	uint16_t	rx_power;
+	uint16_t	flags;
+};
+
+#define RDP_SFP_DESC_TAG  0x00010000
+struct fc_rdp_sfp_desc {
+	uint32_t         tag;
+	uint32_t         length;  /* set to size of sfp_info struct */
+	struct fc_rdp_sfp_info sfp_info;
+};
+
+/* Buffer Credit Descriptor */
+struct fc_rdp_bbc_info {
+	uint32_t              port_bbc; /* FC_Port buffer-to-buffer credit */
+	uint32_t              attached_port_bbc;
+	uint32_t              rtt;      /* Round trip time */
+};
+#define RDP_BBC_DESC_TAG  0x00010006
+struct fc_rdp_bbc_desc {
+	uint32_t              tag;
+	uint32_t              length;
+	struct fc_rdp_bbc_info  bbc_info;
+};
+
+/* Optical Element Type Transgression Flags */
+#define RDP_OET_LOW_WARNING  0x1
+#define RDP_OET_HIGH_WARNING 0x2
+#define RDP_OET_LOW_ALARM    0x4
+#define RDP_OET_HIGH_ALARM   0x8
+
+#define RDP_OED_TEMPERATURE  0x1
+#define RDP_OED_VOLTAGE      0x2
+#define RDP_OED_TXBIAS       0x3
+#define RDP_OED_TXPOWER      0x4
+#define RDP_OED_RXPOWER      0x5
+
+#define RDP_OED_TYPE_SHIFT   28
+/* Optical Element Data descriptor */
+struct fc_rdp_oed_info {
+	uint16_t            hi_alarm;
+	uint16_t            lo_alarm;
+	uint16_t            hi_warning;
+	uint16_t            lo_warning;
+	uint32_t            function_flags;
+};
+#define RDP_OED_DESC_TAG  0x00010007
+struct fc_rdp_oed_sfp_desc {
+	uint32_t             tag;
+	uint32_t             length;
+	struct fc_rdp_oed_info oed_info;
+};
+
+/* Optical Product Data descriptor */
+struct fc_rdp_opd_sfp_info {
+	uint8_t            vendor_name[16];
+	uint8_t            model_number[16];
+	uint8_t            serial_number[16];
+	uint8_t            revision[4];
+	uint8_t            date[8];
+};
+
+#define RDP_OPD_DESC_TAG  0x00010008
+struct fc_rdp_opd_sfp_desc {
+	uint32_t             tag;
+	uint32_t             length;
+	struct fc_rdp_opd_sfp_info opd_info;
+};
+
+struct fc_rdp_req_frame {
+	uint32_t         rdp_command;           /* ELS command opcode (0x18)*/
+	uint32_t         rdp_des_length;        /* RDP Payload Word 1 */
+	struct fc_rdp_nport_desc nport_id_desc; /* RDP Payload Word 2 - 4 */
+};
+
+
+struct fc_rdp_res_frame {
+	uint32_t    reply_sequence;		/* FC word0 LS_ACC or LS_RJT */
+	uint32_t   length;			/* FC Word 1      */
+	struct fc_rdp_link_service_desc link_service_desc;    /* Word 2 -4   */
+	struct fc_rdp_sfp_desc sfp_desc;                      /* Word 5 -9   */
+	struct fc_rdp_port_speed_desc portspeed_desc;         /* Word 10 -12 */
+	struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13 -21 */
+	struct fc_rdp_port_name_desc diag_port_names_desc;    /* Word 22 -27 */
+	struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28 -33 */
+	struct fc_fec_rdp_desc fec_desc;                      /* FC word 34-37*/
+	struct fc_rdp_bbc_desc bbc_desc;                      /* FC Word 38-42*/
+	struct fc_rdp_oed_sfp_desc oed_temp_desc;             /* FC Word 43-47*/
+	struct fc_rdp_oed_sfp_desc oed_voltage_desc;          /* FC word 48-52*/
+	struct fc_rdp_oed_sfp_desc oed_txbias_desc;           /* FC word 53-57*/
+	struct fc_rdp_oed_sfp_desc oed_txpower_desc;          /* FC word 58-62*/
+	struct fc_rdp_oed_sfp_desc oed_rxpower_desc;          /* FC word 63-67*/
+	struct fc_rdp_opd_sfp_desc opd_desc;                  /* FC word 68-84*/
+};
+
+
+/******** FDMI ********/
+
+/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
+#define  SLI_CT_FDMI_Subtypes     0x10	/* Management Service Subtype */
+
+/* Definitions for HBA / Port attribute entries */
+
+/* Attribute Entry */
+struct lpfc_fdmi_attr_entry {
+	union {
+		uint32_t AttrInt;
+		uint8_t  AttrTypes[32];
+		uint8_t  AttrString[256];
+		struct lpfc_name AttrWWN;
+	} un;
+};
+
+struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+	/* Structure is in Big Endian format */
+	uint32_t AttrType:16;
+	uint32_t AttrLen:16;
+	/* Marks start of Value (ATTRIBUTE_ENTRY) */
+	struct lpfc_fdmi_attr_entry AttrValue;
+} __packed;
+
+/*
+ * HBA Attribute Block
+ */
+struct lpfc_fdmi_attr_block {
+	uint32_t EntryCnt;		/* Number of HBA attribute entries */
+	struct lpfc_fdmi_attr_entry Entry;	/* Variable-length array */
+};
+
+/*
+ * Port Entry
+ */
+struct lpfc_fdmi_port_entry {
+	struct lpfc_name PortName;
+};
+
+/*
+ * HBA Identifier
+ */
+struct lpfc_fdmi_hba_ident {
+	struct lpfc_name PortName;
+};
+
+/*
+ * Registered Port List Format
+ */
+struct lpfc_fdmi_reg_port_list {
+	uint32_t EntryCnt;
+	struct lpfc_fdmi_port_entry pe;
+} __packed;
+
+/*
+ * Register HBA(RHBA)
+ */
+struct lpfc_fdmi_reg_hba {
+	struct lpfc_fdmi_hba_ident hi;
+	struct lpfc_fdmi_reg_port_list rpl;
+};
+
+/*
+ * Register HBA Attributes (RHAT)
+ */
+struct lpfc_fdmi_reg_hbaattr {
+	struct lpfc_name HBA_PortName;
+	struct lpfc_fdmi_attr_block ab;
+};
+
+/*
+ * Register Port Attributes (RPA)
+ */
+struct lpfc_fdmi_reg_portattr {
+	struct lpfc_name PortName;
+	struct lpfc_fdmi_attr_block ab;
+};
+
+/*
+ * HBA MAnagement Operations Command Codes
+ */
+#define  SLI_MGMT_GRHL     0x100	/* Get registered HBA list */
+#define  SLI_MGMT_GHAT     0x101	/* Get HBA attributes */
+#define  SLI_MGMT_GRPL     0x102	/* Get registered Port list */
+#define  SLI_MGMT_GPAT     0x110	/* Get Port attributes */
+#define  SLI_MGMT_GPAS     0x120	/* Get Port Statistics */
+#define  SLI_MGMT_RHBA     0x200	/* Register HBA */
+#define  SLI_MGMT_RHAT     0x201	/* Register HBA attributes */
+#define  SLI_MGMT_RPRT     0x210	/* Register Port */
+#define  SLI_MGMT_RPA      0x211	/* Register Port attributes */
+#define  SLI_MGMT_DHBA     0x300	/* De-register HBA */
+#define  SLI_MGMT_DHAT     0x301	/* De-register HBA attributes */
+#define  SLI_MGMT_DPRT     0x310	/* De-register Port */
+#define  SLI_MGMT_DPA      0x311	/* De-register Port attributes */
+
+#define LPFC_FDMI_MAX_RETRY     3  /* Max retries for a FDMI command */
+
+/*
+ * HBA Attribute Types
+ */
+#define  RHBA_NODENAME           0x1 /* 8 byte WWNN */
+#define  RHBA_MANUFACTURER       0x2 /* 4 to 64 byte ASCII string */
+#define  RHBA_SERIAL_NUMBER      0x3 /* 4 to 64 byte ASCII string */
+#define  RHBA_MODEL              0x4 /* 4 to 256 byte ASCII string */
+#define  RHBA_MODEL_DESCRIPTION  0x5 /* 4 to 256 byte ASCII string */
+#define  RHBA_HARDWARE_VERSION   0x6 /* 4 to 256 byte ASCII string */
+#define  RHBA_DRIVER_VERSION     0x7 /* 4 to 256 byte ASCII string */
+#define  RHBA_OPTION_ROM_VERSION 0x8 /* 4 to 256 byte ASCII string */
+#define  RHBA_FIRMWARE_VERSION   0x9 /* 4 to 256 byte ASCII string */
+#define  RHBA_OS_NAME_VERSION	 0xa /* 4 to 256 byte ASCII string */
+#define  RHBA_MAX_CT_PAYLOAD_LEN 0xb /* 32-bit unsigned int */
+#define  RHBA_SYM_NODENAME       0xc /* 4 to 256 byte ASCII string */
+#define  RHBA_VENDOR_INFO        0xd  /* 32-bit unsigned int */
+#define  RHBA_NUM_PORTS          0xe  /* 32-bit unsigned int */
+#define  RHBA_FABRIC_WWNN        0xf  /* 8 byte WWNN */
+#define  RHBA_BIOS_VERSION       0x10 /* 4 to 256 byte ASCII string */
+#define  RHBA_BIOS_STATE         0x11 /* 32-bit unsigned int */
+#define  RHBA_VENDOR_ID          0xe0 /* 8 byte ASCII string */
+
+/* Bit mask for all individual HBA attributes */
+#define LPFC_FDMI_HBA_ATTR_wwnn			0x00000001
+#define LPFC_FDMI_HBA_ATTR_manufacturer		0x00000002
+#define LPFC_FDMI_HBA_ATTR_sn			0x00000004
+#define LPFC_FDMI_HBA_ATTR_model		0x00000008
+#define LPFC_FDMI_HBA_ATTR_description		0x00000010
+#define LPFC_FDMI_HBA_ATTR_hdw_ver		0x00000020
+#define LPFC_FDMI_HBA_ATTR_drvr_ver		0x00000040
+#define LPFC_FDMI_HBA_ATTR_rom_ver		0x00000080
+#define LPFC_FDMI_HBA_ATTR_fmw_ver		0x00000100
+#define LPFC_FDMI_HBA_ATTR_os_ver		0x00000200
+#define LPFC_FDMI_HBA_ATTR_ct_len		0x00000400
+#define LPFC_FDMI_HBA_ATTR_symbolic_name	0x00000800
+#define LPFC_FDMI_HBA_ATTR_vendor_info		0x00001000 /* Not used */
+#define LPFC_FDMI_HBA_ATTR_num_ports		0x00002000
+#define LPFC_FDMI_HBA_ATTR_fabric_wwnn		0x00004000
+#define LPFC_FDMI_HBA_ATTR_bios_ver		0x00008000
+#define LPFC_FDMI_HBA_ATTR_bios_state		0x00010000 /* Not used */
+#define LPFC_FDMI_HBA_ATTR_vendor_id		0x00020000
+
+/* Bit mask for FDMI-1 defined HBA attributes */
+#define LPFC_FDMI1_HBA_ATTR			0x000007ff
+
+/* Bit mask for FDMI-2 defined HBA attributes */
+/* Skip vendor_info and bios_state */
+#define LPFC_FDMI2_HBA_ATTR			0x0002efff
+
+/*
+ * Port Attrubute Types
+ */
+#define  RPRT_SUPPORTED_FC4_TYPES     0x1 /* 32 byte binary array */
+#define  RPRT_SUPPORTED_SPEED         0x2 /* 32-bit unsigned int */
+#define  RPRT_PORT_SPEED              0x3 /* 32-bit unsigned int */
+#define  RPRT_MAX_FRAME_SIZE          0x4 /* 32-bit unsigned int */
+#define  RPRT_OS_DEVICE_NAME          0x5 /* 4 to 256 byte ASCII string */
+#define  RPRT_HOST_NAME               0x6 /* 4 to 256 byte ASCII string */
+#define  RPRT_NODENAME                0x7 /* 8 byte WWNN */
+#define  RPRT_PORTNAME                0x8 /* 8 byte WWPN */
+#define  RPRT_SYM_PORTNAME            0x9 /* 4 to 256 byte ASCII string */
+#define  RPRT_PORT_TYPE               0xa /* 32-bit unsigned int */
+#define  RPRT_SUPPORTED_CLASS         0xb /* 32-bit unsigned int */
+#define  RPRT_FABRICNAME              0xc /* 8 byte Fabric WWPN */
+#define  RPRT_ACTIVE_FC4_TYPES        0xd /* 32 byte binary array */
+#define  RPRT_PORT_STATE              0x101 /* 32-bit unsigned int */
+#define  RPRT_DISC_PORT               0x102 /* 32-bit unsigned int */
+#define  RPRT_PORT_ID                 0x103 /* 32-bit unsigned int */
+#define  RPRT_SMART_SERVICE           0xf100 /* 4 to 256 byte ASCII string */
+#define  RPRT_SMART_GUID              0xf101 /* 8 byte WWNN + 8 byte WWPN */
+#define  RPRT_SMART_VERSION           0xf102 /* 4 to 256 byte ASCII string */
+#define  RPRT_SMART_MODEL             0xf103 /* 4 to 256 byte ASCII string */
+#define  RPRT_SMART_PORT_INFO         0xf104 /* 32-bit unsigned int */
+#define  RPRT_SMART_QOS               0xf105 /* 32-bit unsigned int */
+#define  RPRT_SMART_SECURITY          0xf106 /* 32-bit unsigned int */
+
+/* Bit mask for all individual PORT attributes */
+#define LPFC_FDMI_PORT_ATTR_fc4type		0x00000001
+#define LPFC_FDMI_PORT_ATTR_support_speed	0x00000002
+#define LPFC_FDMI_PORT_ATTR_speed		0x00000004
+#define LPFC_FDMI_PORT_ATTR_max_frame		0x00000008
+#define LPFC_FDMI_PORT_ATTR_os_devname		0x00000010
+#define LPFC_FDMI_PORT_ATTR_host_name		0x00000020
+#define LPFC_FDMI_PORT_ATTR_wwnn		0x00000040
+#define LPFC_FDMI_PORT_ATTR_wwpn		0x00000080
+#define LPFC_FDMI_PORT_ATTR_symbolic_name	0x00000100
+#define LPFC_FDMI_PORT_ATTR_port_type		0x00000200
+#define LPFC_FDMI_PORT_ATTR_class		0x00000400
+#define LPFC_FDMI_PORT_ATTR_fabric_wwpn		0x00000800
+#define LPFC_FDMI_PORT_ATTR_port_state		0x00001000
+#define LPFC_FDMI_PORT_ATTR_active_fc4type	0x00002000
+#define LPFC_FDMI_PORT_ATTR_num_disc		0x00004000
+#define LPFC_FDMI_PORT_ATTR_nportid		0x00008000
+#define LPFC_FDMI_SMART_ATTR_service		0x00010000 /* Vendor specific */
+#define LPFC_FDMI_SMART_ATTR_guid		0x00020000 /* Vendor specific */
+#define LPFC_FDMI_SMART_ATTR_version		0x00040000 /* Vendor specific */
+#define LPFC_FDMI_SMART_ATTR_model		0x00080000 /* Vendor specific */
+#define LPFC_FDMI_SMART_ATTR_port_info		0x00100000 /* Vendor specific */
+#define LPFC_FDMI_SMART_ATTR_qos		0x00200000 /* Vendor specific */
+#define LPFC_FDMI_SMART_ATTR_security		0x00400000 /* Vendor specific */
+
+/* Bit mask for FDMI-1 defined PORT attributes */
+#define LPFC_FDMI1_PORT_ATTR			0x0000003f
+
+/* Bit mask for FDMI-2 defined PORT attributes */
+#define LPFC_FDMI2_PORT_ATTR			0x0000ffff
+
+/* Bit mask for Smart SAN defined PORT attributes */
+#define LPFC_FDMI2_SMART_ATTR			0x007fffff
+
+/* Defines for PORT port state attribute */
+#define LPFC_FDMI_PORTSTATE_UNKNOWN	1
+#define LPFC_FDMI_PORTSTATE_ONLINE	2
+
+/* Defines for PORT port type attribute */
+#define LPFC_FDMI_PORTTYPE_UNKNOWN	0
+#define LPFC_FDMI_PORTTYPE_NPORT	1
+#define LPFC_FDMI_PORTTYPE_NLPORT	2
+
+/*
+ *  Begin HBA configuration parameters.
+ *  The PCI configuration register BAR assignments are:
+ *  BAR0, offset 0x10 - SLIM base memory address
+ *  BAR1, offset 0x14 - SLIM base memory high address
+ *  BAR2, offset 0x18 - REGISTER base memory address
+ *  BAR3, offset 0x1c - REGISTER base memory high address
+ *  BAR4, offset 0x20 - BIU I/O registers
+ *  BAR5, offset 0x24 - REGISTER base io high address
+ */
+
+/* Number of rings currently used and available. */
+#define MAX_SLI3_CONFIGURED_RINGS     3
+#define MAX_SLI3_RINGS                4
+
+/* IOCB / Mailbox is owned by FireFly */
+#define OWN_CHIP        1
+
+/* IOCB / Mailbox is owned by Host */
+#define OWN_HOST        0
+
+/* Number of 4-byte words in an IOCB. */
+#define IOCB_WORD_SZ    8
+
+/* network headers for Dfctl field */
+#define FC_NET_HDR      0x20
+
+/* Start FireFly Register definitions */
+#define PCI_VENDOR_ID_EMULEX        0x10df
+#define PCI_DEVICE_ID_FIREFLY       0x1ae5
+#define PCI_DEVICE_ID_PROTEUS_VF    0xe100
+#define PCI_DEVICE_ID_BALIUS        0xe131
+#define PCI_DEVICE_ID_PROTEUS_PF    0xe180
+#define PCI_DEVICE_ID_LANCER_FC     0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF  0xe208
+#define PCI_DEVICE_ID_LANCER_FCOE   0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
+#define PCI_DEVICE_ID_LANCER_G6_FC  0xe300
+#define PCI_DEVICE_ID_SAT_SMB       0xf011
+#define PCI_DEVICE_ID_SAT_MID       0xf015
+#define PCI_DEVICE_ID_RFLY          0xf095
+#define PCI_DEVICE_ID_PFLY          0xf098
+#define PCI_DEVICE_ID_LP101         0xf0a1
+#define PCI_DEVICE_ID_TFLY          0xf0a5
+#define PCI_DEVICE_ID_BSMB          0xf0d1
+#define PCI_DEVICE_ID_BMID          0xf0d5
+#define PCI_DEVICE_ID_ZSMB          0xf0e1
+#define PCI_DEVICE_ID_ZMID          0xf0e5
+#define PCI_DEVICE_ID_NEPTUNE       0xf0f5
+#define PCI_DEVICE_ID_NEPTUNE_SCSP  0xf0f6
+#define PCI_DEVICE_ID_NEPTUNE_DCSP  0xf0f7
+#define PCI_DEVICE_ID_SAT           0xf100
+#define PCI_DEVICE_ID_SAT_SCSP      0xf111
+#define PCI_DEVICE_ID_SAT_DCSP      0xf112
+#define PCI_DEVICE_ID_FALCON        0xf180
+#define PCI_DEVICE_ID_SUPERFLY      0xf700
+#define PCI_DEVICE_ID_DRAGONFLY     0xf800
+#define PCI_DEVICE_ID_CENTAUR       0xf900
+#define PCI_DEVICE_ID_PEGASUS       0xf980
+#define PCI_DEVICE_ID_THOR          0xfa00
+#define PCI_DEVICE_ID_VIPER         0xfb00
+#define PCI_DEVICE_ID_LP10000S      0xfc00
+#define PCI_DEVICE_ID_LP11000S      0xfc10
+#define PCI_DEVICE_ID_LPE11000S     0xfc20
+#define PCI_DEVICE_ID_SAT_S         0xfc40
+#define PCI_DEVICE_ID_PROTEUS_S     0xfc50
+#define PCI_DEVICE_ID_HELIOS        0xfd00
+#define PCI_DEVICE_ID_HELIOS_SCSP   0xfd11
+#define PCI_DEVICE_ID_HELIOS_DCSP   0xfd12
+#define PCI_DEVICE_ID_ZEPHYR        0xfe00
+#define PCI_DEVICE_ID_HORNET        0xfe05
+#define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
+#define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE  0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK    0x0704
+#define PCI_DEVICE_ID_TOMCAT        0x0714
+#define PCI_DEVICE_ID_SKYHAWK       0x0724
+#define PCI_DEVICE_ID_SKYHAWK_VF    0x072c
+
+#define JEDEC_ID_ADDRESS            0x0080001c
+#define FIREFLY_JEDEC_ID            0x1ACC
+#define SUPERFLY_JEDEC_ID           0x0020
+#define DRAGONFLY_JEDEC_ID          0x0021
+#define DRAGONFLY_V2_JEDEC_ID       0x0025
+#define CENTAUR_2G_JEDEC_ID         0x0026
+#define CENTAUR_1G_JEDEC_ID         0x0028
+#define PEGASUS_ORION_JEDEC_ID      0x0036
+#define PEGASUS_JEDEC_ID            0x0038
+#define THOR_JEDEC_ID               0x0012
+#define HELIOS_JEDEC_ID             0x0364
+#define ZEPHYR_JEDEC_ID             0x0577
+#define VIPER_JEDEC_ID              0x4838
+#define SATURN_JEDEC_ID             0x1004
+#define HORNET_JDEC_ID              0x2057706D
+
+#define JEDEC_ID_MASK               0x0FFFF000
+#define JEDEC_ID_SHIFT              12
+#define FC_JEDEC_ID(id)             ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
+
+typedef struct {		/* FireFly BIU registers */
+	uint32_t hostAtt;	/* See definitions for Host Attention
+				   register */
+	uint32_t chipAtt;	/* See definitions for Chip Attention
+				   register */
+	uint32_t hostStatus;	/* See definitions for Host Status register */
+	uint32_t hostControl;	/* See definitions for Host Control register */
+	uint32_t buiConfig;	/* See definitions for BIU configuration
+				   register */
+} FF_REGS;
+
+/* IO Register size in bytes */
+#define FF_REG_AREA_SIZE       256
+
+/* Host Attention Register */
+
+#define HA_REG_OFFSET  0	/* Byte offset from register base address */
+
+#define HA_R0RE_REQ    0x00000001	/* Bit  0 */
+#define HA_R0CE_RSP    0x00000002	/* Bit  1 */
+#define HA_R0ATT       0x00000008	/* Bit  3 */
+#define HA_R1RE_REQ    0x00000010	/* Bit  4 */
+#define HA_R1CE_RSP    0x00000020	/* Bit  5 */
+#define HA_R1ATT       0x00000080	/* Bit  7 */
+#define HA_R2RE_REQ    0x00000100	/* Bit  8 */
+#define HA_R2CE_RSP    0x00000200	/* Bit  9 */
+#define HA_R2ATT       0x00000800	/* Bit 11 */
+#define HA_R3RE_REQ    0x00001000	/* Bit 12 */
+#define HA_R3CE_RSP    0x00002000	/* Bit 13 */
+#define HA_R3ATT       0x00008000	/* Bit 15 */
+#define HA_LATT        0x20000000	/* Bit 29 */
+#define HA_MBATT       0x40000000	/* Bit 30 */
+#define HA_ERATT       0x80000000	/* Bit 31 */
+
+#define HA_RXRE_REQ    0x00000001	/* Bit  0 */
+#define HA_RXCE_RSP    0x00000002	/* Bit  1 */
+#define HA_RXATT       0x00000008	/* Bit  3 */
+#define HA_RXMASK      0x0000000f
+
+#define HA_R0_CLR_MSK	(HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
+#define HA_R1_CLR_MSK	(HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
+#define HA_R2_CLR_MSK	(HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
+#define HA_R3_CLR_MSK	(HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
+
+#define HA_R0_POS	3
+#define HA_R1_POS	7
+#define HA_R2_POS	11
+#define HA_R3_POS	15
+#define HA_LE_POS	29
+#define HA_MB_POS	30
+#define HA_ER_POS	31
+/* Chip Attention Register */
+
+#define CA_REG_OFFSET  4	/* Byte offset from register base address */
+
+#define CA_R0CE_REQ    0x00000001	/* Bit  0 */
+#define CA_R0RE_RSP    0x00000002	/* Bit  1 */
+#define CA_R0ATT       0x00000008	/* Bit  3 */
+#define CA_R1CE_REQ    0x00000010	/* Bit  4 */
+#define CA_R1RE_RSP    0x00000020	/* Bit  5 */
+#define CA_R1ATT       0x00000080	/* Bit  7 */
+#define CA_R2CE_REQ    0x00000100	/* Bit  8 */
+#define CA_R2RE_RSP    0x00000200	/* Bit  9 */
+#define CA_R2ATT       0x00000800	/* Bit 11 */
+#define CA_R3CE_REQ    0x00001000	/* Bit 12 */
+#define CA_R3RE_RSP    0x00002000	/* Bit 13 */
+#define CA_R3ATT       0x00008000	/* Bit 15 */
+#define CA_MBATT       0x40000000	/* Bit 30 */
+
+/* Host Status Register */
+
+#define HS_REG_OFFSET  8	/* Byte offset from register base address */
+
+#define HS_MBRDY       0x00400000	/* Bit 22 */
+#define HS_FFRDY       0x00800000	/* Bit 23 */
+#define HS_FFER8       0x01000000	/* Bit 24 */
+#define HS_FFER7       0x02000000	/* Bit 25 */
+#define HS_FFER6       0x04000000	/* Bit 26 */
+#define HS_FFER5       0x08000000	/* Bit 27 */
+#define HS_FFER4       0x10000000	/* Bit 28 */
+#define HS_FFER3       0x20000000	/* Bit 29 */
+#define HS_FFER2       0x40000000	/* Bit 30 */
+#define HS_FFER1       0x80000000	/* Bit 31 */
+#define HS_CRIT_TEMP   0x00000100	/* Bit 8  */
+#define HS_FFERM       0xFF000100	/* Mask for error bits 31:24 and 8 */
+#define UNPLUG_ERR     0x00000001	/* Indicate pci hot unplug */
+/* Host Control Register */
+
+#define HC_REG_OFFSET  12	/* Byte offset from register base address */
+
+#define HC_MBINT_ENA   0x00000001	/* Bit  0 */
+#define HC_R0INT_ENA   0x00000002	/* Bit  1 */
+#define HC_R1INT_ENA   0x00000004	/* Bit  2 */
+#define HC_R2INT_ENA   0x00000008	/* Bit  3 */
+#define HC_R3INT_ENA   0x00000010	/* Bit  4 */
+#define HC_INITHBI     0x02000000	/* Bit 25 */
+#define HC_INITMB      0x04000000	/* Bit 26 */
+#define HC_INITFF      0x08000000	/* Bit 27 */
+#define HC_LAINT_ENA   0x20000000	/* Bit 29 */
+#define HC_ERINT_ENA   0x80000000	/* Bit 31 */
+
+/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
+#define MSIX_DFLT_ID	0
+#define MSIX_RNG0_ID	0
+#define MSIX_RNG1_ID	1
+#define MSIX_RNG2_ID	2
+#define MSIX_RNG3_ID	3
+
+#define MSIX_LINK_ID	4
+#define MSIX_MBOX_ID	5
+
+#define MSIX_SPARE0_ID	6
+#define MSIX_SPARE1_ID	7
+
+/* Mailbox Commands */
+#define MBX_SHUTDOWN        0x00	/* terminate testing */
+#define MBX_LOAD_SM         0x01
+#define MBX_READ_NV         0x02
+#define MBX_WRITE_NV        0x03
+#define MBX_RUN_BIU_DIAG    0x04
+#define MBX_INIT_LINK       0x05
+#define MBX_DOWN_LINK       0x06
+#define MBX_CONFIG_LINK     0x07
+#define MBX_CONFIG_RING     0x09
+#define MBX_RESET_RING      0x0A
+#define MBX_READ_CONFIG     0x0B
+#define MBX_READ_RCONFIG    0x0C
+#define MBX_READ_SPARM      0x0D
+#define MBX_READ_STATUS     0x0E
+#define MBX_READ_RPI        0x0F
+#define MBX_READ_XRI        0x10
+#define MBX_READ_REV        0x11
+#define MBX_READ_LNK_STAT   0x12
+#define MBX_REG_LOGIN       0x13
+#define MBX_UNREG_LOGIN     0x14
+#define MBX_CLEAR_LA        0x16
+#define MBX_DUMP_MEMORY     0x17
+#define MBX_DUMP_CONTEXT    0x18
+#define MBX_RUN_DIAGS       0x19
+#define MBX_RESTART         0x1A
+#define MBX_UPDATE_CFG      0x1B
+#define MBX_DOWN_LOAD       0x1C
+#define MBX_DEL_LD_ENTRY    0x1D
+#define MBX_RUN_PROGRAM     0x1E
+#define MBX_SET_MASK        0x20
+#define MBX_SET_VARIABLE    0x21
+#define MBX_UNREG_D_ID      0x23
+#define MBX_KILL_BOARD      0x24
+#define MBX_CONFIG_FARP     0x25
+#define MBX_BEACON          0x2A
+#define MBX_CONFIG_MSI      0x30
+#define MBX_HEARTBEAT       0x31
+#define MBX_WRITE_VPARMS    0x32
+#define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_READ_EVENT_LOG_STATUS 0x37
+#define MBX_READ_EVENT_LOG  0x38
+#define MBX_WRITE_EVENT_LOG 0x39
+
+#define MBX_PORT_CAPABILITIES 0x3B
+#define MBX_PORT_IOV_CONTROL 0x3C
+
+#define MBX_CONFIG_HBQ	    0x7C
+#define MBX_LOAD_AREA       0x81
+#define MBX_RUN_BIU_DIAG64  0x84
+#define MBX_CONFIG_PORT     0x88
+#define MBX_READ_SPARM64    0x8D
+#define MBX_READ_RPI64      0x8F
+#define MBX_REG_LOGIN64     0x93
+#define MBX_READ_TOPOLOGY   0x95
+#define MBX_REG_VPI	    0x96
+#define MBX_UNREG_VPI	    0x97
+
+#define MBX_WRITE_WWN       0x98
+#define MBX_SET_DEBUG       0x99
+#define MBX_LOAD_EXP_ROM    0x9C
+#define MBX_SLI4_CONFIG	    0x9B
+#define MBX_SLI4_REQ_FTRS   0x9D
+#define MBX_MAX_CMDS        0x9E
+#define MBX_RESUME_RPI      0x9E
+#define MBX_SLI2_CMD_MASK   0x80
+#define MBX_REG_VFI         0x9F
+#define MBX_REG_FCFI        0xA0
+#define MBX_UNREG_VFI       0xA1
+#define MBX_UNREG_FCFI	    0xA2
+#define MBX_INIT_VFI        0xA3
+#define MBX_INIT_VPI        0xA4
+#define MBX_ACCESS_VDATA    0xA5
+#define MBX_REG_FCFI_MRQ    0xAF
+
+#define MBX_AUTH_PORT       0xF8
+#define MBX_SECURITY_MGMT   0xF9
+
+/* IOCB Commands */
+
+#define CMD_RCV_SEQUENCE_CX     0x01
+#define CMD_XMIT_SEQUENCE_CR    0x02
+#define CMD_XMIT_SEQUENCE_CX    0x03
+#define CMD_XMIT_BCAST_CN       0x04
+#define CMD_XMIT_BCAST_CX       0x05
+#define CMD_QUE_RING_BUF_CN     0x06
+#define CMD_QUE_XRI_BUF_CX      0x07
+#define CMD_IOCB_CONTINUE_CN    0x08
+#define CMD_RET_XRI_BUF_CX      0x09
+#define CMD_ELS_REQUEST_CR      0x0A
+#define CMD_ELS_REQUEST_CX      0x0B
+#define CMD_RCV_ELS_REQ_CX      0x0D
+#define CMD_ABORT_XRI_CN        0x0E
+#define CMD_ABORT_XRI_CX        0x0F
+#define CMD_CLOSE_XRI_CN        0x10
+#define CMD_CLOSE_XRI_CX        0x11
+#define CMD_CREATE_XRI_CR       0x12
+#define CMD_CREATE_XRI_CX       0x13
+#define CMD_GET_RPI_CN          0x14
+#define CMD_XMIT_ELS_RSP_CX     0x15
+#define CMD_GET_RPI_CR          0x16
+#define CMD_XRI_ABORTED_CX      0x17
+#define CMD_FCP_IWRITE_CR       0x18
+#define CMD_FCP_IWRITE_CX       0x19
+#define CMD_FCP_IREAD_CR        0x1A
+#define CMD_FCP_IREAD_CX        0x1B
+#define CMD_FCP_ICMND_CR        0x1C
+#define CMD_FCP_ICMND_CX        0x1D
+#define CMD_FCP_TSEND_CX        0x1F
+#define CMD_FCP_TRECEIVE_CX     0x21
+#define CMD_FCP_TRSP_CX	        0x23
+#define CMD_FCP_AUTO_TRSP_CX    0x29
+
+#define CMD_ADAPTER_MSG         0x20
+#define CMD_ADAPTER_DUMP        0x22
+
+/*  SLI_2 IOCB Command Set */
+
+#define CMD_ASYNC_STATUS        0x7C
+#define CMD_RCV_SEQUENCE64_CX   0x81
+#define CMD_XMIT_SEQUENCE64_CR  0x82
+#define CMD_XMIT_SEQUENCE64_CX  0x83
+#define CMD_XMIT_BCAST64_CN     0x84
+#define CMD_XMIT_BCAST64_CX     0x85
+#define CMD_QUE_RING_BUF64_CN   0x86
+#define CMD_QUE_XRI_BUF64_CX    0x87
+#define CMD_IOCB_CONTINUE64_CN  0x88
+#define CMD_RET_XRI_BUF64_CX    0x89
+#define CMD_ELS_REQUEST64_CR    0x8A
+#define CMD_ELS_REQUEST64_CX    0x8B
+#define CMD_ABORT_MXRI64_CN     0x8C
+#define CMD_RCV_ELS_REQ64_CX    0x8D
+#define CMD_XMIT_ELS_RSP64_CX   0x95
+#define CMD_XMIT_BLS_RSP64_CX   0x97
+#define CMD_FCP_IWRITE64_CR     0x98
+#define CMD_FCP_IWRITE64_CX     0x99
+#define CMD_FCP_IREAD64_CR      0x9A
+#define CMD_FCP_IREAD64_CX      0x9B
+#define CMD_FCP_ICMND64_CR      0x9C
+#define CMD_FCP_ICMND64_CX      0x9D
+#define CMD_FCP_TSEND64_CX      0x9F
+#define CMD_FCP_TRECEIVE64_CX   0xA1
+#define CMD_FCP_TRSP64_CX       0xA3
+
+#define CMD_QUE_XRI64_CX	0xB3
+#define CMD_IOCB_RCV_SEQ64_CX	0xB5
+#define CMD_IOCB_RCV_ELS64_CX	0xB7
+#define CMD_IOCB_RET_XRI64_CX	0xB9
+#define CMD_IOCB_RCV_CONT64_CX	0xBB
+
+#define CMD_GEN_REQUEST64_CR    0xC2
+#define CMD_GEN_REQUEST64_CX    0xC3
+
+/* Unhandled SLI-3 Commands */
+#define CMD_IOCB_XMIT_MSEQ64_CR		0xB0
+#define CMD_IOCB_XMIT_MSEQ64_CX		0xB1
+#define CMD_IOCB_RCV_SEQ_LIST64_CX	0xC1
+#define CMD_IOCB_RCV_ELS_LIST64_CX	0xCD
+#define CMD_IOCB_CLOSE_EXTENDED_CN	0xB6
+#define CMD_IOCB_ABORT_EXTENDED_CN	0xBA
+#define CMD_IOCB_RET_HBQE64_CN		0xCA
+#define CMD_IOCB_FCP_IBIDIR64_CR	0xAC
+#define CMD_IOCB_FCP_IBIDIR64_CX	0xAD
+#define CMD_IOCB_FCP_ITASKMGT64_CX	0xAF
+#define CMD_IOCB_LOGENTRY_CN		0x94
+#define CMD_IOCB_LOGENTRY_ASYNC_CN	0x96
+
+/* Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR		0xF8
+#define DSSCMD_IWRITE64_CX		0xF9
+#define DSSCMD_IREAD64_CR		0xFA
+#define DSSCMD_IREAD64_CX		0xFB
+
+#define CMD_MAX_IOCB_CMD        0xFB
+#define CMD_IOCB_MASK           0xff
+
+#define MAX_MSG_DATA            28	/* max msg data in CMD_ADAPTER_MSG
+					   iocb */
+#define LPFC_MAX_ADPTMSG         32	/* max msg data */
+/*
+ *  Define Status
+ */
+#define MBX_SUCCESS                 0
+#define MBXERR_NUM_RINGS            1
+#define MBXERR_NUM_IOCBS            2
+#define MBXERR_IOCBS_EXCEEDED       3
+#define MBXERR_BAD_RING_NUMBER      4
+#define MBXERR_MASK_ENTRIES_RANGE   5
+#define MBXERR_MASKS_EXCEEDED       6
+#define MBXERR_BAD_PROFILE          7
+#define MBXERR_BAD_DEF_CLASS        8
+#define MBXERR_BAD_MAX_RESPONDER    9
+#define MBXERR_BAD_MAX_ORIGINATOR   10
+#define MBXERR_RPI_REGISTERED       11
+#define MBXERR_RPI_FULL             12
+#define MBXERR_NO_RESOURCES         13
+#define MBXERR_BAD_RCV_LENGTH       14
+#define MBXERR_DMA_ERROR            15
+#define MBXERR_ERROR                16
+#define MBXERR_LINK_DOWN            0x33
+#define MBXERR_SEC_NO_PERMISSION    0xF02
+#define MBX_NOT_FINISHED            255
+
+#define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
+#define MBX_TIMEOUT                0xfffffe /* time-out expired waiting for */
+
+#define TEMPERATURE_OFFSET 0xB0	/* Slim offset for critical temperature event */
+
+/*
+ * return code Fail
+ */
+#define FAILURE 1
+
+/*
+ *    Begin Structure Definitions for Mailbox Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t tval;
+	uint8_t tmask;
+	uint8_t rval;
+	uint8_t rmask;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t rmask;
+	uint8_t rval;
+	uint8_t tmask;
+	uint8_t tval;
+#endif
+} RR_REG;
+
+struct ulp_bde {
+	uint32_t bdeAddress;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t bdeReserved:4;
+	uint32_t bdeAddrHigh:4;
+	uint32_t bdeSize:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t bdeSize:24;
+	uint32_t bdeAddrHigh:4;
+	uint32_t bdeReserved:4;
+#endif
+};
+
+typedef struct ULP_BDL {	/* SLI-2 */
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t bdeFlags:8;	/* BDL Flags */
+	uint32_t bdeSize:24;	/* Size of BDL array in host memory (bytes) */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t bdeSize:24;	/* Size of BDL array in host memory (bytes) */
+	uint32_t bdeFlags:8;	/* BDL Flags */
+#endif
+
+	uint32_t addrLow;	/* Address 0:31 */
+	uint32_t addrHigh;	/* Address 32:63 */
+	uint32_t ulpIoTag32;	/* Can be used for 32 bit I/O Tag */
+} ULP_BDL;
+
+/*
+ * BlockGuard Definitions
+ */
+
+enum lpfc_protgrp_type {
+	LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors                  */
+	LPFC_PG_TYPE_NO_DIF,	  /* no DIF data pointed to by prot grp       */
+	LPFC_PG_TYPE_EMBD_DIF,	  /* DIF is embedded (inline) with data       */
+	LPFC_PG_TYPE_DIF_BUF	  /* DIF has its own scatter/gather list      */
+};
+
+/* PDE Descriptors */
+#define LPFC_PDE5_DESCRIPTOR		0x85
+#define LPFC_PDE6_DESCRIPTOR		0x86
+#define LPFC_PDE7_DESCRIPTOR		0x87
+
+/* BlockGuard Opcodes */
+#define BG_OP_IN_NODIF_OUT_CRC		0x0
+#define	BG_OP_IN_CRC_OUT_NODIF		0x1
+#define	BG_OP_IN_NODIF_OUT_CSUM		0x2
+#define	BG_OP_IN_CSUM_OUT_NODIF		0x3
+#define	BG_OP_IN_CRC_OUT_CRC		0x4
+#define	BG_OP_IN_CSUM_OUT_CSUM		0x5
+#define	BG_OP_IN_CRC_OUT_CSUM		0x6
+#define	BG_OP_IN_CSUM_OUT_CRC		0x7
+#define	BG_OP_RAW_MODE			0x8
+
+struct lpfc_pde5 {
+	uint32_t word0;
+#define pde5_type_SHIFT		24
+#define pde5_type_MASK		0x000000ff
+#define pde5_type_WORD		word0
+#define pde5_rsvd0_SHIFT	0
+#define pde5_rsvd0_MASK		0x00ffffff
+#define pde5_rsvd0_WORD		word0
+	uint32_t reftag;	/* Reference Tag Value			*/
+	uint32_t reftagtr;	/* Reference Tag Translation Value 	*/
+};
+
+struct lpfc_pde6 {
+	uint32_t word0;
+#define pde6_type_SHIFT		24
+#define pde6_type_MASK		0x000000ff
+#define pde6_type_WORD		word0
+#define pde6_rsvd0_SHIFT	0
+#define pde6_rsvd0_MASK		0x00ffffff
+#define pde6_rsvd0_WORD		word0
+	uint32_t word1;
+#define pde6_rsvd1_SHIFT	26
+#define pde6_rsvd1_MASK		0x0000003f
+#define pde6_rsvd1_WORD		word1
+#define pde6_na_SHIFT		25
+#define pde6_na_MASK		0x00000001
+#define pde6_na_WORD		word1
+#define pde6_rsvd2_SHIFT	16
+#define pde6_rsvd2_MASK		0x000001FF
+#define pde6_rsvd2_WORD		word1
+#define pde6_apptagtr_SHIFT	0
+#define pde6_apptagtr_MASK	0x0000ffff
+#define pde6_apptagtr_WORD	word1
+	uint32_t word2;
+#define pde6_optx_SHIFT		28
+#define pde6_optx_MASK		0x0000000f
+#define pde6_optx_WORD		word2
+#define pde6_oprx_SHIFT		24
+#define pde6_oprx_MASK		0x0000000f
+#define pde6_oprx_WORD		word2
+#define pde6_nr_SHIFT		23
+#define pde6_nr_MASK		0x00000001
+#define pde6_nr_WORD		word2
+#define pde6_ce_SHIFT		22
+#define pde6_ce_MASK		0x00000001
+#define pde6_ce_WORD		word2
+#define pde6_re_SHIFT		21
+#define pde6_re_MASK		0x00000001
+#define pde6_re_WORD		word2
+#define pde6_ae_SHIFT		20
+#define pde6_ae_MASK		0x00000001
+#define pde6_ae_WORD		word2
+#define pde6_ai_SHIFT		19
+#define pde6_ai_MASK		0x00000001
+#define pde6_ai_WORD		word2
+#define pde6_bs_SHIFT		16
+#define pde6_bs_MASK		0x00000007
+#define pde6_bs_WORD		word2
+#define pde6_apptagval_SHIFT	0
+#define pde6_apptagval_MASK	0x0000ffff
+#define pde6_apptagval_WORD	word2
+};
+
+struct lpfc_pde7 {
+	uint32_t word0;
+#define pde7_type_SHIFT		24
+#define pde7_type_MASK		0x000000ff
+#define pde7_type_WORD		word0
+#define pde7_rsvd0_SHIFT	0
+#define pde7_rsvd0_MASK		0x00ffffff
+#define pde7_rsvd0_WORD		word0
+	uint32_t addrHigh;
+	uint32_t addrLow;
+};
+
+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd2:25;
+	uint32_t acknowledgment:1;
+	uint32_t version:1;
+	uint32_t erase_or_prog:1;
+	uint32_t update_flash:1;
+	uint32_t update_ram:1;
+	uint32_t method:1;
+	uint32_t load_cmplt:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t load_cmplt:1;
+	uint32_t method:1;
+	uint32_t update_ram:1;
+	uint32_t update_flash:1;
+	uint32_t erase_or_prog:1;
+	uint32_t version:1;
+	uint32_t acknowledgment:1;
+	uint32_t rsvd2:25;
+#endif
+
+	uint32_t dl_to_adr_low;
+	uint32_t dl_to_adr_high;
+	uint32_t dl_len;
+	union {
+		uint32_t dl_from_mbx_offset;
+		struct ulp_bde dl_from_bde;
+		struct ulp_bde64 dl_from_bde64;
+	} un;
+
+} LOAD_SM_VAR;
+
+/* Structure for MB Command READ_NVPARM (02) */
+
+typedef struct {
+	uint32_t rsvd1[3];	/* Read as all one's */
+	uint32_t rsvd2;		/* Read as all zero's */
+	uint32_t portname[2];	/* N_PORT name */
+	uint32_t nodename[2];	/* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t pref_DID:24;
+	uint32_t hardAL_PA:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t hardAL_PA:8;
+	uint32_t pref_DID:24;
+#endif
+
+	uint32_t rsvd3[21];	/* Read as all one's */
+} READ_NV_VAR;
+
+/* Structure for MB Command WRITE_NVPARMS (03) */
+
+typedef struct {
+	uint32_t rsvd1[3];	/* Must be all one's */
+	uint32_t rsvd2;		/* Must be all zero's */
+	uint32_t portname[2];	/* N_PORT name */
+	uint32_t nodename[2];	/* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t pref_DID:24;
+	uint32_t hardAL_PA:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t hardAL_PA:8;
+	uint32_t pref_DID:24;
+#endif
+
+	uint32_t rsvd3[21];	/* Must be all one's */
+} WRITE_NV_VAR;
+
+/* Structure for MB Command RUN_BIU_DIAG (04) */
+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
+
+typedef struct {
+	uint32_t rsvd1;
+	union {
+		struct {
+			struct ulp_bde xmit_bde;
+			struct ulp_bde rcv_bde;
+		} s1;
+		struct {
+			struct ulp_bde64 xmit_bde64;
+			struct ulp_bde64 rcv_bde64;
+		} s2;
+	} un;
+} BIU_DIAG_VAR;
+
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+struct READ_EVENT_LOG_VAR {
+	uint32_t word1;
+#define lpfc_event_log_SHIFT	29
+#define lpfc_event_log_MASK	0x00000001
+#define lpfc_event_log_WORD	word1
+#define USE_MAILBOX_RESPONSE	1
+	uint32_t offset;
+	struct ulp_bde64 rcv_bde64;
+};
+
+/* Structure for MB Command INIT_LINK (05) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1:24;
+	uint32_t lipsr_AL_PA:8;	/* AL_PA to issue Lip Selective Reset to */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t lipsr_AL_PA:8;	/* AL_PA to issue Lip Selective Reset to */
+	uint32_t rsvd1:24;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t fabric_AL_PA;	/* If using a Fabric Assigned AL_PA */
+	uint8_t rsvd2;
+	uint16_t link_flags;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t link_flags;
+	uint8_t rsvd2;
+	uint8_t fabric_AL_PA;	/* If using a Fabric Assigned AL_PA */
+#endif
+
+#define FLAGS_TOPOLOGY_MODE_LOOP_PT  0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
+#define FLAGS_TOPOLOGY_MODE_PT_PT    0x02 /* Attempt pt-pt only */
+#define FLAGS_TOPOLOGY_MODE_LOOP     0x04 /* Attempt loop only */
+#define FLAGS_TOPOLOGY_MODE_PT_LOOP  0x06 /* Attempt pt-pt then loop */
+#define	FLAGS_UNREG_LOGIN_ALL	     0x08 /* UNREG_LOGIN all on link down */
+#define FLAGS_LIRP_LILP              0x80 /* LIRP / LILP is disabled */
+
+#define FLAGS_TOPOLOGY_FAILOVER      0x0400	/* Bit 10 */
+#define FLAGS_LINK_SPEED             0x0800	/* Bit 11 */
+#define FLAGS_IMED_ABORT             0x04000	/* Bit 14 */
+
+	uint32_t link_speed;
+#define LINK_SPEED_AUTO 0x0     /* Auto selection */
+#define LINK_SPEED_1G   0x1     /* 1 Gigabaud */
+#define LINK_SPEED_2G   0x2     /* 2 Gigabaud */
+#define LINK_SPEED_4G   0x4     /* 4 Gigabaud */
+#define LINK_SPEED_8G   0x8     /* 8 Gigabaud */
+#define LINK_SPEED_10G  0x10    /* 10 Gigabaud */
+#define LINK_SPEED_16G  0x11    /* 16 Gigabaud */
+#define LINK_SPEED_32G  0x14    /* 32 Gigabaud */
+
+} INIT_LINK_VAR;
+
+/* Structure for MB Command DOWN_LINK (06) */
+
+typedef struct {
+	uint32_t rsvd1;
+} DOWN_LINK_VAR;
+
+/* Structure for MB Command CONFIG_LINK (07) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cr:1;
+	uint32_t ci:1;
+	uint32_t cr_delay:6;
+	uint32_t cr_count:8;
+	uint32_t rsvd1:8;
+	uint32_t MaxBBC:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t MaxBBC:8;
+	uint32_t rsvd1:8;
+	uint32_t cr_count:8;
+	uint32_t cr_delay:6;
+	uint32_t ci:1;
+	uint32_t cr:1;
+#endif
+
+	uint32_t myId;
+	uint32_t rsvd2;
+	uint32_t edtov;
+	uint32_t arbtov;
+	uint32_t ratov;
+	uint32_t rttov;
+	uint32_t altov;
+	uint32_t crtov;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd4:19;
+	uint32_t cscn:1;
+	uint32_t bbscn:4;
+	uint32_t rsvd3:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rsvd3:8;
+	uint32_t bbscn:4;
+	uint32_t cscn:1;
+	uint32_t rsvd4:19;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rrq_enable:1;
+	uint32_t rrq_immed:1;
+	uint32_t rsvd5:29;
+	uint32_t ack0_enable:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ack0_enable:1;
+	uint32_t rsvd5:29;
+	uint32_t rrq_immed:1;
+	uint32_t rrq_enable:1;
+#endif
+} CONFIG_LINK;
+
+/* Structure for MB Command PART_SLIM (08)
+ * will be removed since SLI1 is no longer supported!
+ */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t offCiocb;
+	uint16_t numCiocb;
+	uint16_t offRiocb;
+	uint16_t numRiocb;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t numCiocb;
+	uint16_t offCiocb;
+	uint16_t numRiocb;
+	uint16_t offRiocb;
+#endif
+} RING_DEF;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t unused1:24;
+	uint32_t numRing:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t numRing:8;
+	uint32_t unused1:24;
+#endif
+
+	RING_DEF ringdef[4];
+	uint32_t hbainit;
+} PART_SLIM_VAR;
+
+/* Structure for MB Command CONFIG_RING (09) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t unused2:6;
+	uint32_t recvSeq:1;
+	uint32_t recvNotify:1;
+	uint32_t numMask:8;
+	uint32_t profile:8;
+	uint32_t unused1:4;
+	uint32_t ring:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ring:4;
+	uint32_t unused1:4;
+	uint32_t profile:8;
+	uint32_t numMask:8;
+	uint32_t recvNotify:1;
+	uint32_t recvSeq:1;
+	uint32_t unused2:6;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t maxRespXchg;
+	uint16_t maxOrigXchg;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t maxOrigXchg;
+	uint16_t maxRespXchg;
+#endif
+
+	RR_REG rrRegs[6];
+} CONFIG_RING_VAR;
+
+/* Structure for MB Command RESET_RING (10) */
+
+typedef struct {
+	uint32_t ring_no;
+} RESET_RING_VAR;
+
+/* Structure for MB Command READ_CONFIG (11) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cr:1;
+	uint32_t ci:1;
+	uint32_t cr_delay:6;
+	uint32_t cr_count:8;
+	uint32_t InitBBC:8;
+	uint32_t MaxBBC:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t MaxBBC:8;
+	uint32_t InitBBC:8;
+	uint32_t cr_count:8;
+	uint32_t cr_delay:6;
+	uint32_t ci:1;
+	uint32_t cr:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t topology:8;
+	uint32_t myDid:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t myDid:24;
+	uint32_t topology:8;
+#endif
+
+	/* Defines for topology (defined previously) */
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t AR:1;
+	uint32_t IR:1;
+	uint32_t rsvd1:29;
+	uint32_t ack0:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ack0:1;
+	uint32_t rsvd1:29;
+	uint32_t IR:1;
+	uint32_t AR:1;
+#endif
+
+	uint32_t edtov;
+	uint32_t arbtov;
+	uint32_t ratov;
+	uint32_t rttov;
+	uint32_t altov;
+	uint32_t lmt;
+#define LMT_RESERVED  0x000    /* Not used */
+#define LMT_1Gb       0x004
+#define LMT_2Gb       0x008
+#define LMT_4Gb       0x040
+#define LMT_8Gb       0x080
+#define LMT_10Gb      0x100
+#define LMT_16Gb      0x200
+#define LMT_32Gb      0x400
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t max_xri;
+	uint32_t max_iocb;
+	uint32_t max_rpi;
+	uint32_t avail_xri;
+	uint32_t avail_iocb;
+	uint32_t avail_rpi;
+	uint32_t max_vpi;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+	uint32_t avail_vpi;
+} READ_CONFIG_VAR;
+
+/* Structure for MB Command READ_RCONFIG (12) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd2:7;
+	uint32_t recvNotify:1;
+	uint32_t numMask:8;
+	uint32_t profile:8;
+	uint32_t rsvd1:4;
+	uint32_t ring:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ring:4;
+	uint32_t rsvd1:4;
+	uint32_t profile:8;
+	uint32_t numMask:8;
+	uint32_t recvNotify:1;
+	uint32_t rsvd2:7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t maxResp;
+	uint16_t maxOrig;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t maxOrig;
+	uint16_t maxResp;
+#endif
+
+	RR_REG rrRegs[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t cmdRingOffset;
+	uint16_t cmdEntryCnt;
+	uint16_t rspRingOffset;
+	uint16_t rspEntryCnt;
+	uint16_t nextCmdOffset;
+	uint16_t rsvd3;
+	uint16_t nextRspOffset;
+	uint16_t rsvd4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t cmdEntryCnt;
+	uint16_t cmdRingOffset;
+	uint16_t rspEntryCnt;
+	uint16_t rspRingOffset;
+	uint16_t rsvd3;
+	uint16_t nextCmdOffset;
+	uint16_t rsvd4;
+	uint16_t nextRspOffset;
+#endif
+} READ_RCONF_VAR;
+
+/* Structure for MB Command READ_SPARM (13) */
+/* Structure for MB Command READ_SPARM64 (0x8D) */
+
+typedef struct {
+	uint32_t rsvd1;
+	uint32_t rsvd2;
+	union {
+		struct ulp_bde sp; /* This BDE points to struct serv_parm
+				      structure */
+		struct ulp_bde64 sp64;
+	} un;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd3;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t vpi;
+	uint16_t rsvd3;
+#endif
+} READ_SPARM_VAR;
+
+/* Structure for MB Command READ_STATUS (14) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1:31;
+	uint32_t clrCounters:1;
+	uint16_t activeXriCnt;
+	uint16_t activeRpiCnt;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t clrCounters:1;
+	uint32_t rsvd1:31;
+	uint16_t activeRpiCnt;
+	uint16_t activeXriCnt;
+#endif
+
+	uint32_t xmitByteCnt;
+	uint32_t rcvByteCnt;
+	uint32_t xmitFrameCnt;
+	uint32_t rcvFrameCnt;
+	uint32_t xmitSeqCnt;
+	uint32_t rcvSeqCnt;
+	uint32_t totalOrigExchanges;
+	uint32_t totalRespExchanges;
+	uint32_t rcvPbsyCnt;
+	uint32_t rcvFbsyCnt;
+} READ_STATUS_VAR;
+
+/* Structure for MB Command READ_RPI (15) */
+/* Structure for MB Command READ_RPI64 (0x8F) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t nextRpi;
+	uint16_t reqRpi;
+	uint32_t rsvd2:8;
+	uint32_t DID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t reqRpi;
+	uint16_t nextRpi;
+	uint32_t DID:24;
+	uint32_t rsvd2:8;
+#endif
+
+	union {
+		struct ulp_bde sp;
+		struct ulp_bde64 sp64;
+	} un;
+
+} READ_RPI_VAR;
+
+/* Structure for MB Command READ_XRI (16) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t nextXri;
+	uint16_t reqXri;
+	uint16_t rsvd1;
+	uint16_t rpi;
+	uint32_t rsvd2:8;
+	uint32_t DID:24;
+	uint32_t rsvd3:8;
+	uint32_t SID:24;
+	uint32_t rsvd4;
+	uint8_t seqId;
+	uint8_t rsvd5;
+	uint16_t seqCount;
+	uint16_t oxId;
+	uint16_t rxId;
+	uint32_t rsvd6:30;
+	uint32_t si:1;
+	uint32_t exchOrig:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t reqXri;
+	uint16_t nextXri;
+	uint16_t rpi;
+	uint16_t rsvd1;
+	uint32_t DID:24;
+	uint32_t rsvd2:8;
+	uint32_t SID:24;
+	uint32_t rsvd3:8;
+	uint32_t rsvd4;
+	uint16_t seqCount;
+	uint8_t rsvd5;
+	uint8_t seqId;
+	uint16_t rxId;
+	uint16_t oxId;
+	uint32_t exchOrig:1;
+	uint32_t si:1;
+	uint32_t rsvd6:30;
+#endif
+} READ_XRI_VAR;
+
+/* Structure for MB Command READ_REV (17) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cv:1;
+	uint32_t rr:1;
+	uint32_t rsvd2:2;
+	uint32_t v3req:1;
+	uint32_t v3rsp:1;
+	uint32_t rsvd1:25;
+	uint32_t rv:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rv:1;
+	uint32_t rsvd1:25;
+	uint32_t v3rsp:1;
+	uint32_t v3req:1;
+	uint32_t rsvd2:2;
+	uint32_t rr:1;
+	uint32_t cv:1;
+#endif
+
+	uint32_t biuRev;
+	uint32_t smRev;
+	union {
+		uint32_t smFwRev;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t ProgType;
+			uint8_t ProgId;
+			uint16_t ProgVer:4;
+			uint16_t ProgRev:4;
+			uint16_t ProgFixLvl:2;
+			uint16_t ProgDistType:2;
+			uint16_t DistCnt:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint16_t DistCnt:4;
+			uint16_t ProgDistType:2;
+			uint16_t ProgFixLvl:2;
+			uint16_t ProgRev:4;
+			uint16_t ProgVer:4;
+			uint8_t ProgId;
+			uint8_t ProgType;
+#endif
+
+		} b;
+	} un;
+	uint32_t endecRev;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t feaLevelHigh;
+	uint8_t feaLevelLow;
+	uint8_t fcphHigh;
+	uint8_t fcphLow;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t fcphLow;
+	uint8_t fcphHigh;
+	uint8_t feaLevelLow;
+	uint8_t feaLevelHigh;
+#endif
+
+	uint32_t postKernRev;
+	uint32_t opFwRev;
+	uint8_t opFwName[16];
+	uint32_t sli1FwRev;
+	uint8_t sli1FwName[16];
+	uint32_t sli2FwRev;
+	uint8_t sli2FwName[16];
+	uint32_t sli3Feat;
+	uint32_t RandomData[6];
+} READ_REV_VAR;
+
+/* Structure for MB Command READ_LINK_STAT (18) */
+
+typedef struct {
+	uint32_t word0;
+
+#define lpfc_read_link_stat_rec_SHIFT   0
+#define lpfc_read_link_stat_rec_MASK   0x1
+#define lpfc_read_link_stat_rec_WORD   word0
+
+#define lpfc_read_link_stat_gec_SHIFT	1
+#define lpfc_read_link_stat_gec_MASK   0x1
+#define lpfc_read_link_stat_gec_WORD   word0
+
+#define lpfc_read_link_stat_w02oftow23of_SHIFT	2
+#define lpfc_read_link_stat_w02oftow23of_MASK   0x3FFFFF
+#define lpfc_read_link_stat_w02oftow23of_WORD   word0
+
+#define lpfc_read_link_stat_rsvd_SHIFT	24
+#define lpfc_read_link_stat_rsvd_MASK   0x1F
+#define lpfc_read_link_stat_rsvd_WORD   word0
+
+#define lpfc_read_link_stat_gec2_SHIFT  29
+#define lpfc_read_link_stat_gec2_MASK   0x1
+#define lpfc_read_link_stat_gec2_WORD   word0
+
+#define lpfc_read_link_stat_clrc_SHIFT  30
+#define lpfc_read_link_stat_clrc_MASK   0x1
+#define lpfc_read_link_stat_clrc_WORD   word0
+
+#define lpfc_read_link_stat_clof_SHIFT  31
+#define lpfc_read_link_stat_clof_MASK   0x1
+#define lpfc_read_link_stat_clof_WORD   word0
+
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+	uint32_t primSeqTimeout;
+	uint32_t elasticOverrun;
+	uint32_t arbTimeout;
+	uint32_t advRecBufCredit;
+	uint32_t curRecBufCredit;
+	uint32_t advTransBufCredit;
+	uint32_t curTransBufCredit;
+	uint32_t recEofCount;
+	uint32_t recEofdtiCount;
+	uint32_t recEofniCount;
+	uint32_t recSofcount;
+	uint32_t rsvd1;
+	uint32_t rsvd2;
+	uint32_t recDrpXriCount;
+	uint32_t fecCorrBlkCount;
+	uint32_t fecUncorrBlkCount;
+} READ_LNK_VAR;
+
+/* Structure for MB Command REG_LOGIN (19) */
+/* Structure for MB Command REG_LOGIN64 (0x93) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd1;
+	uint16_t rpi;
+	uint32_t rsvd2:8;
+	uint32_t did:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t rpi;
+	uint16_t rsvd1;
+	uint32_t did:24;
+	uint32_t rsvd2:8;
+#endif
+
+	union {
+		struct ulp_bde sp;
+		struct ulp_bde64 sp64;
+	} un;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+
+} REG_LOGIN_VAR;
+
+/* Word 30 contents for REG_LOGIN */
+typedef union {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint16_t rsvd1:12;
+		uint16_t wd30_class:4;
+		uint16_t xri;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+		uint16_t xri;
+		uint16_t wd30_class:4;
+		uint16_t rsvd1:12;
+#endif
+	} f;
+	uint32_t word;
+} REG_WD30;
+
+/* Structure for MB Command UNREG_LOGIN (20) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd1;
+	uint16_t rpi;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t rpi;
+	uint16_t rsvd1;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+} UNREG_LOGIN_VAR;
+
+/* Structure for MB Command REG_VPI (0x96) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1;
+	uint32_t rsvd2:7;
+	uint32_t upd:1;
+	uint32_t sid:24;
+	uint32_t wwn[2];
+	uint32_t rsvd5;
+	uint16_t vfi;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t rsvd1;
+	uint32_t sid:24;
+	uint32_t upd:1;
+	uint32_t rsvd2:7;
+	uint32_t wwn[2];
+	uint32_t rsvd5;
+	uint16_t vpi;
+	uint16_t vfi;
+#endif
+} REG_VPI_VAR;
+
+/* Structure for MB Command UNREG_VPI (0x97) */
+typedef struct {
+	uint32_t rsvd1;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd2;
+	uint16_t sli4_vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint16_t sli4_vpi;
+	uint16_t rsvd2;
+#endif
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+} UNREG_VPI_VAR;
+
+/* Structure for MB Command UNREG_D_ID (0x23) */
+
+typedef struct {
+	uint32_t did;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+} UNREG_D_ID_VAR;
+
+/* Structure for MB Command READ_TOPOLOGY (0x95) */
+struct lpfc_mbx_read_top {
+	uint32_t eventTag;	/* Event tag */
+	uint32_t word2;
+#define lpfc_mbx_read_top_fa_SHIFT		12
+#define lpfc_mbx_read_top_fa_MASK		0x00000001
+#define lpfc_mbx_read_top_fa_WORD		word2
+#define lpfc_mbx_read_top_mm_SHIFT		11
+#define lpfc_mbx_read_top_mm_MASK		0x00000001
+#define lpfc_mbx_read_top_mm_WORD		word2
+#define lpfc_mbx_read_top_pb_SHIFT		9
+#define lpfc_mbx_read_top_pb_MASK		0X00000001
+#define lpfc_mbx_read_top_pb_WORD		word2
+#define lpfc_mbx_read_top_il_SHIFT		8
+#define lpfc_mbx_read_top_il_MASK		0x00000001
+#define lpfc_mbx_read_top_il_WORD		word2
+#define lpfc_mbx_read_top_att_type_SHIFT	0
+#define lpfc_mbx_read_top_att_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_att_type_WORD		word2
+#define LPFC_ATT_RESERVED    0x00	/* Reserved - attType */
+#define LPFC_ATT_LINK_UP     0x01	/* Link is up */
+#define LPFC_ATT_LINK_DOWN   0x02	/* Link is down */
+#define LPFC_ATT_UNEXP_WWPN  0x06	/* Link is down Unexpected WWWPN */
+	uint32_t word3;
+#define lpfc_mbx_read_top_alpa_granted_SHIFT	24
+#define lpfc_mbx_read_top_alpa_granted_MASK	0x000000FF
+#define lpfc_mbx_read_top_alpa_granted_WORD	word3
+#define lpfc_mbx_read_top_lip_alps_SHIFT	16
+#define lpfc_mbx_read_top_lip_alps_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_alps_WORD		word3
+#define lpfc_mbx_read_top_lip_type_SHIFT	8
+#define lpfc_mbx_read_top_lip_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_type_WORD		word3
+#define lpfc_mbx_read_top_topology_SHIFT	0
+#define lpfc_mbx_read_top_topology_MASK		0x000000FF
+#define lpfc_mbx_read_top_topology_WORD		word3
+#define LPFC_TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
+#define LPFC_TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
+#define LPFC_TOPOLOGY_MM    0x05	/* maint mode zephtr to menlo */
+	/* store the LILP AL_PA position map into */
+	struct ulp_bde64 lilpBde64;
+#define LPFC_ALPA_MAP_SIZE	128
+	uint32_t word7;
+#define lpfc_mbx_read_top_ld_lu_SHIFT		31
+#define lpfc_mbx_read_top_ld_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_lu_WORD		word7
+#define lpfc_mbx_read_top_ld_tf_SHIFT		30
+#define lpfc_mbx_read_top_ld_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_tf_WORD		word7
+#define lpfc_mbx_read_top_ld_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_ld_link_spd_MASK	0x000000FF
+#define lpfc_mbx_read_top_ld_link_spd_WORD	word7
+#define lpfc_mbx_read_top_ld_nl_port_SHIFT	4
+#define lpfc_mbx_read_top_ld_nl_port_MASK	0x0000000F
+#define lpfc_mbx_read_top_ld_nl_port_WORD	word7
+#define lpfc_mbx_read_top_ld_tx_SHIFT		2
+#define lpfc_mbx_read_top_ld_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_tx_WORD		word7
+#define lpfc_mbx_read_top_ld_rx_SHIFT		0
+#define lpfc_mbx_read_top_ld_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_rx_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_top_lu_SHIFT		31
+#define lpfc_mbx_read_top_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_lu_WORD		word8
+#define lpfc_mbx_read_top_tf_SHIFT		30
+#define lpfc_mbx_read_top_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_tf_WORD		word8
+#define lpfc_mbx_read_top_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_link_spd_MASK		0x000000FF
+#define lpfc_mbx_read_top_link_spd_WORD		word8
+#define lpfc_mbx_read_top_nl_port_SHIFT		4
+#define lpfc_mbx_read_top_nl_port_MASK		0x0000000F
+#define lpfc_mbx_read_top_nl_port_WORD		word8
+#define lpfc_mbx_read_top_tx_SHIFT		2
+#define lpfc_mbx_read_top_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_tx_WORD		word8
+#define lpfc_mbx_read_top_rx_SHIFT		0
+#define lpfc_mbx_read_top_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_rx_WORD		word8
+#define LPFC_LINK_SPEED_UNKNOWN	0x0
+#define LPFC_LINK_SPEED_1GHZ	0x04
+#define LPFC_LINK_SPEED_2GHZ	0x08
+#define LPFC_LINK_SPEED_4GHZ	0x10
+#define LPFC_LINK_SPEED_8GHZ	0x20
+#define LPFC_LINK_SPEED_10GHZ	0x40
+#define LPFC_LINK_SPEED_16GHZ	0x80
+#define LPFC_LINK_SPEED_32GHZ	0x90
+};
+
+/* Structure for MB Command CLEAR_LA (22) */
+
+typedef struct {
+	uint32_t eventTag;	/* Event tag */
+	uint32_t rsvd1;
+} CLEAR_LA_VAR;
+
+/* Structure for MB Command DUMP */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd:25;
+	uint32_t ra:1;
+	uint32_t co:1;
+	uint32_t cv:1;
+	uint32_t type:4;
+	uint32_t entry_index:16;
+	uint32_t region_id:16;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t type:4;
+	uint32_t cv:1;
+	uint32_t co:1;
+	uint32_t ra:1;
+	uint32_t rsvd:25;
+	uint32_t region_id:16;
+	uint32_t entry_index:16;
+#endif
+
+	uint32_t sli4_length;
+	uint32_t word_cnt;
+	uint32_t resp_offset;
+} DUMP_VAR;
+
+#define  DMP_MEM_REG             0x1
+#define  DMP_NV_PARAMS           0x2
+#define  DMP_LMSD                0x3 /* Link Module Serial Data */
+#define  DMP_WELL_KNOWN          0x4
+
+#define  DMP_REGION_VPD          0xe
+#define  DMP_VPD_SIZE            0x400  /* maximum amount of VPD */
+#define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
+#define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
+
+#define  DMP_REGION_VPORT	 0x16   /* VPort info region */
+#define  DMP_VPORT_REGION_SIZE	 0x200
+#define  DMP_MBOX_OFFSET_WORD	 0x5
+
+#define  DMP_REGION_23		 0x17   /* fcoe param  and port state region */
+#define  DMP_RGN23_SIZE		 0x400
+
+#define  WAKE_UP_PARMS_REGION_ID    4
+#define  WAKE_UP_PARMS_WORD_SIZE   15
+
+struct vport_rec {
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+	uint32_t		signature;
+	uint32_t		rev;
+	struct vport_rec	vport_list[MAX_STATIC_VPORT_COUNT];
+	uint32_t		resvd[66];
+};
+
+/* Option rom version structure */
+struct prog_id {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t  type;
+	uint8_t  id;
+	uint32_t ver:4;  /* Major Version */
+	uint32_t rev:4;  /* Revision */
+	uint32_t lev:2;  /* Level */
+	uint32_t dist:2; /* Dist Type */
+	uint32_t num:4;  /* number after dist type */
+#else /*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t num:4;  /* number after dist type */
+	uint32_t dist:2; /* Dist Type */
+	uint32_t lev:2;  /* Level */
+	uint32_t rev:4;  /* Revision */
+	uint32_t ver:4;  /* Major Version */
+	uint8_t  id;
+	uint8_t  type;
+#endif
+};
+
+/* Structure for MB Command UPDATE_CFG (0x1B) */
+
+struct update_cfg_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd2:16;
+	uint32_t type:8;
+	uint32_t rsvd:1;
+	uint32_t ra:1;
+	uint32_t co:1;
+	uint32_t cv:1;
+	uint32_t req:4;
+	uint32_t entry_length:16;
+	uint32_t region_id:16;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t req:4;
+	uint32_t cv:1;
+	uint32_t co:1;
+	uint32_t ra:1;
+	uint32_t rsvd:1;
+	uint32_t type:8;
+	uint32_t rsvd2:16;
+	uint32_t region_id:16;
+	uint32_t entry_length:16;
+#endif
+
+	uint32_t resp_info;
+	uint32_t byte_cnt;
+	uint32_t data_offset;
+};
+
+struct hbq_mask {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t tmatch;
+	uint8_t tmask;
+	uint8_t rctlmatch;
+	uint8_t rctlmask;
+#else	/*  __LITTLE_ENDIAN */
+	uint8_t rctlmask;
+	uint8_t rctlmatch;
+	uint8_t tmask;
+	uint8_t tmatch;
+#endif
+};
+
+
+/* Structure for MB Command CONFIG_HBQ (7c) */
+
+struct config_hbq_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1      :7;
+	uint32_t recvNotify :1;     /* Receive Notification */
+	uint32_t numMask    :8;     /* # Mask Entries       */
+	uint32_t profile    :8;     /* Selection Profile    */
+	uint32_t rsvd2      :8;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t rsvd2      :8;
+	uint32_t profile    :8;     /* Selection Profile    */
+	uint32_t numMask    :8;     /* # Mask Entries       */
+	uint32_t recvNotify :1;     /* Receive Notification */
+	uint32_t rsvd1      :7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t hbqId      :16;
+	uint32_t rsvd3      :12;
+	uint32_t ringMask   :4;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t ringMask   :4;
+	uint32_t rsvd3      :12;
+	uint32_t hbqId      :16;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t entry_count :16;
+	uint32_t rsvd4        :8;
+	uint32_t headerLen    :8;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t headerLen    :8;
+	uint32_t rsvd4        :8;
+	uint32_t entry_count :16;
+#endif
+
+	uint32_t hbqaddrLow;
+	uint32_t hbqaddrHigh;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd5      :31;
+	uint32_t logEntry   :1;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t logEntry   :1;
+	uint32_t rsvd5      :31;
+#endif
+
+	uint32_t rsvd6;    /* w7 */
+	uint32_t rsvd7;    /* w8 */
+	uint32_t rsvd8;    /* w9 */
+
+	struct hbq_mask hbqMasks[6];
+
+
+	union {
+		uint32_t allprofiles[12];
+
+		struct {
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	seqlenoff	:16;
+				uint32_t	maxlen		:16;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	maxlen		:16;
+				uint32_t	seqlenoff	:16;
+			#endif
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	rsvd1		:28;
+				uint32_t	seqlenbcnt	:4;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	seqlenbcnt	:4;
+				uint32_t	rsvd1		:28;
+			#endif
+			uint32_t rsvd[10];
+		} profile2;
+
+		struct {
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	seqlenoff	:16;
+				uint32_t	maxlen		:16;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	maxlen		:16;
+				uint32_t	seqlenoff	:16;
+			#endif
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	cmdcodeoff	:28;
+				uint32_t	rsvd1		:12;
+				uint32_t	seqlenbcnt	:4;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	seqlenbcnt	:4;
+				uint32_t	rsvd1		:12;
+				uint32_t	cmdcodeoff	:28;
+			#endif
+			uint32_t cmdmatch[8];
+
+			uint32_t rsvd[2];
+		} profile3;
+
+		struct {
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	seqlenoff	:16;
+				uint32_t	maxlen		:16;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	maxlen		:16;
+				uint32_t	seqlenoff	:16;
+			#endif
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	cmdcodeoff	:28;
+				uint32_t	rsvd1		:12;
+				uint32_t	seqlenbcnt	:4;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	seqlenbcnt	:4;
+				uint32_t	rsvd1		:12;
+				uint32_t	cmdcodeoff	:28;
+			#endif
+			uint32_t cmdmatch[8];
+
+			uint32_t rsvd[2];
+		} profile5;
+
+	} profiles;
+
+};
+
+
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cBE       :  1;
+	uint32_t cET       :  1;
+	uint32_t cHpcb     :  1;
+	uint32_t cMA       :  1;
+	uint32_t sli_mode  :  4;
+	uint32_t pcbLen    : 24;       /* bit 23:0  of memory based port
+					* config block */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t pcbLen    : 24;       /* bit 23:0  of memory based port
+					* config block */
+	uint32_t sli_mode  :  4;
+	uint32_t cMA       :  1;
+	uint32_t cHpcb     :  1;
+	uint32_t cET       :  1;
+	uint32_t cBE       :  1;
+#endif
+
+	uint32_t pcbLow;       /* bit 31:0  of memory based port config block */
+	uint32_t pcbHigh;      /* bit 63:32 of memory based port config block */
+	uint32_t hbainit[5];
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t hps	   :  1; /* bit 31 word9 Host Pointer in slim */
+	uint32_t rsvd	   : 31; /* least significant 31 bits of word 9 */
+#else   /*  __LITTLE_ENDIAN */
+	uint32_t rsvd      : 31; /* least significant 31 bits of word 9 */
+	uint32_t hps	   :  1; /* bit 31 word9 Host Pointer in slim */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1     : 19;  /* Reserved                             */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t casabt    :  1;  /* Configure async abts status notice   */
+	uint32_t rsvd2     :  2;  /* Reserved                             */
+	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
+	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
+	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
+	uint32_t csah      :  1;  /* Configure Synchronous Abort Handling */
+	uint32_t chbs      :  1;  /* Cofigure Host Backing store          */
+	uint32_t cinb      :  1;  /* Enable Interrupt Notification Block  */
+	uint32_t cerbm	   :  1;  /* Configure Enhanced Receive Buf Mgmt  */
+	uint32_t cmx	   :  1;  /* Configure Max XRIs                   */
+	uint32_t cmr	   :  1;  /* Configure Max RPIs                   */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t cmr	   :  1;  /* Configure Max RPIs                   */
+	uint32_t cmx	   :  1;  /* Configure Max XRIs                   */
+	uint32_t cerbm	   :  1;  /* Configure Enhanced Receive Buf Mgmt  */
+	uint32_t cinb      :  1;  /* Enable Interrupt Notification Block  */
+	uint32_t chbs      :  1;  /* Cofigure Host Backing store          */
+	uint32_t csah      :  1;  /* Configure Synchronous Abort Handling */
+	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
+	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
+	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
+	uint32_t rsvd2     :  2;  /* Reserved                             */
+	uint32_t casabt    :  1;  /* Configure async abts status notice   */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd1     : 19;  /* Reserved                             */
+#endif
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd3     : 19;  /* Reserved                             */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t gasabt    :  1;  /* Grant async abts status notice       */
+	uint32_t rsvd4     :  2;  /* Reserved                             */
+	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
+	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
+	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
+	uint32_t gsah	   :  1;  /* Grant Synchronous Abort Handling     */
+	uint32_t ghbs	   :  1;  /* Grant Host Backing Store             */
+	uint32_t ginb	   :  1;  /* Grant Interrupt Notification Block   */
+	uint32_t gerbm	   :  1;  /* Grant ERBM Request                   */
+	uint32_t gmx	   :  1;  /* Grant Max XRIs                       */
+	uint32_t gmr	   :  1;  /* Grant Max RPIs                       */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t gmr	   :  1;  /* Grant Max RPIs                       */
+	uint32_t gmx	   :  1;  /* Grant Max XRIs                       */
+	uint32_t gerbm	   :  1;  /* Grant ERBM Request                   */
+	uint32_t ginb	   :  1;  /* Grant Interrupt Notification Block   */
+	uint32_t ghbs	   :  1;  /* Grant Host Backing Store             */
+	uint32_t gsah	   :  1;  /* Grant Synchronous Abort Handling     */
+	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
+	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
+	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
+	uint32_t rsvd4     :  2;  /* Reserved                             */
+	uint32_t gasabt    :  1;  /* Grant async abts status notice       */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd3     : 19;  /* Reserved                             */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t max_rpi   : 16;  /* Max RPIs Port should configure       */
+	uint32_t max_xri   : 16;  /* Max XRIs Port should configure       */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t max_xri   : 16;  /* Max XRIs Port should configure       */
+	uint32_t max_rpi   : 16;  /* Max RPIs Port should configure       */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
+#endif
+
+	uint32_t rsvd6;           /* Reserved                             */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t fips_rev   : 3;   /* FIPS Spec Revision                   */
+	uint32_t fips_level : 4;   /* FIPS Level                           */
+	uint32_t sec_err    : 9;   /* security crypto error                */
+	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
+	uint32_t sec_err    : 9;   /* security crypto error                */
+	uint32_t fips_level : 4;   /* FIPS Level                           */
+	uint32_t fips_rev   : 3;   /* FIPS Spec Revision                   */
+#endif
+
+} CONFIG_PORT_VAR;
+
+/* Structure for MB Command CONFIG_MSI (0x30) */
+struct config_msi_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t dfltMsgNum:8;	/* Default message number            */
+	uint32_t rsvd1:11;	/* Reserved                          */
+	uint32_t NID:5;		/* Number of secondary attention IDs */
+	uint32_t rsvd2:5;	/* Reserved                          */
+	uint32_t dfltPresent:1;	/* Default message number present    */
+	uint32_t addFlag:1;	/* Add association flag              */
+	uint32_t reportFlag:1;	/* Report association flag           */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t reportFlag:1;	/* Report association flag           */
+	uint32_t addFlag:1;	/* Add association flag              */
+	uint32_t dfltPresent:1;	/* Default message number present    */
+	uint32_t rsvd2:5;	/* Reserved                          */
+	uint32_t NID:5;		/* Number of secondary attention IDs */
+	uint32_t rsvd1:11;	/* Reserved                          */
+	uint32_t dfltMsgNum:8;	/* Default message number            */
+#endif
+	uint32_t attentionConditions[2];
+	uint8_t  attentionId[16];
+	uint8_t  messageNumberByHA[64];
+	uint8_t  messageNumberByID[16];
+	uint32_t autoClearHA[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd3:16;
+	uint32_t autoClearID:16;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t autoClearID:16;
+	uint32_t rsvd3:16;
+#endif
+	uint32_t rsvd4;
+};
+
+/* SLI-2 Port Control Block */
+
+/* SLIM POINTER */
+#define SLIMOFF 0x30		/* WORD */
+
+typedef struct _SLI2_RDSC {
+	uint32_t cmdEntries;
+	uint32_t cmdAddrLow;
+	uint32_t cmdAddrHigh;
+
+	uint32_t rspEntries;
+	uint32_t rspAddrLow;
+	uint32_t rspAddrHigh;
+} SLI2_RDSC;
+
+typedef struct _PCB {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t type:8;
+#define TYPE_NATIVE_SLI2       0x01
+	uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2   0x01
+	uint32_t rsvd:12;
+	uint32_t maxRing:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t maxRing:4;
+	uint32_t rsvd:12;
+	uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2   0x01
+	uint32_t type:8;
+#define TYPE_NATIVE_SLI2       0x01
+#endif
+
+	uint32_t mailBoxSize;
+	uint32_t mbAddrLow;
+	uint32_t mbAddrHigh;
+
+	uint32_t hgpAddrLow;
+	uint32_t hgpAddrHigh;
+
+	uint32_t pgpAddrLow;
+	uint32_t pgpAddrHigh;
+	SLI2_RDSC rdsc[MAX_SLI3_RINGS];
+} PCB_t;
+
+/* NEW_FEATURE */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd0:27;
+	uint32_t discardFarp:1;
+	uint32_t IPEnable:1;
+	uint32_t nodeName:1;
+	uint32_t portName:1;
+	uint32_t filterEnable:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t filterEnable:1;
+	uint32_t portName:1;
+	uint32_t nodeName:1;
+	uint32_t IPEnable:1;
+	uint32_t discardFarp:1;
+	uint32_t rsvd:27;
+#endif
+
+	uint8_t portname[8];	/* Used to be struct lpfc_name */
+	uint8_t nodename[8];
+	uint32_t rsvd1;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t IPAddress;
+} CONFIG_FARP_VAR;
+
+/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd:30;
+	uint32_t ring:2;	/* Ring for ASYNC_EVENT iocb Bits 0-1*/
+#else /*  __LITTLE_ENDIAN */
+	uint32_t ring:2;	/* Ring for ASYNC_EVENT iocb Bits 0-1*/
+	uint32_t rsvd:30;
+#endif
+} ASYNCEVT_ENABLE_VAR;
+
+/* Union of all Mailbox Command types */
+#define MAILBOX_CMD_WSIZE	32
+#define MAILBOX_CMD_SIZE	(MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+/* ext_wsize times 4 bytes should not be greater than max xmit size */
+#define MAILBOX_EXT_WSIZE	512
+#define MAILBOX_EXT_SIZE	(MAILBOX_EXT_WSIZE * sizeof(uint32_t))
+#define MAILBOX_HBA_EXT_OFFSET  0x100
+/* max mbox xmit size is a page size for sysfs IO operations */
+#define MAILBOX_SYSFS_MAX	4096
+
+typedef union {
+	uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
+						    * feature/max ring number
+						    */
+	LOAD_SM_VAR varLdSM;		/* cmd =  1 (LOAD_SM)        */
+	READ_NV_VAR varRDnvp;		/* cmd =  2 (READ_NVPARMS)   */
+	WRITE_NV_VAR varWTnvp;		/* cmd =  3 (WRITE_NVPARMS)  */
+	BIU_DIAG_VAR varBIUdiag;	/* cmd =  4 (RUN_BIU_DIAG)   */
+	INIT_LINK_VAR varInitLnk;	/* cmd =  5 (INIT_LINK)      */
+	DOWN_LINK_VAR varDwnLnk;	/* cmd =  6 (DOWN_LINK)      */
+	CONFIG_LINK varCfgLnk;		/* cmd =  7 (CONFIG_LINK)    */
+	PART_SLIM_VAR varSlim;		/* cmd =  8 (PART_SLIM)      */
+	CONFIG_RING_VAR varCfgRing;	/* cmd =  9 (CONFIG_RING)    */
+	RESET_RING_VAR varRstRing;	/* cmd = 10 (RESET_RING)     */
+	READ_CONFIG_VAR varRdConfig;	/* cmd = 11 (READ_CONFIG)    */
+	READ_RCONF_VAR varRdRConfig;	/* cmd = 12 (READ_RCONFIG)   */
+	READ_SPARM_VAR varRdSparm;	/* cmd = 13 (READ_SPARM(64)) */
+	READ_STATUS_VAR varRdStatus;	/* cmd = 14 (READ_STATUS)    */
+	READ_RPI_VAR varRdRPI;		/* cmd = 15 (READ_RPI(64))   */
+	READ_XRI_VAR varRdXRI;		/* cmd = 16 (READ_XRI)       */
+	READ_REV_VAR varRdRev;		/* cmd = 17 (READ_REV)       */
+	READ_LNK_VAR varRdLnk;		/* cmd = 18 (READ_LNK_STAT)  */
+	REG_LOGIN_VAR varRegLogin;	/* cmd = 19 (REG_LOGIN(64))  */
+	UNREG_LOGIN_VAR varUnregLogin;	/* cmd = 20 (UNREG_LOGIN)    */
+	CLEAR_LA_VAR varClearLA;	/* cmd = 22 (CLEAR_LA)       */
+	DUMP_VAR varDmp;		/* Warm Start DUMP mbx cmd   */
+	UNREG_D_ID_VAR varUnregDID;	/* cmd = 0x23 (UNREG_D_ID)   */
+	CONFIG_FARP_VAR varCfgFarp;	/* cmd = 0x25 (CONFIG_FARP)
+					 * NEW_FEATURE
+					 */
+	struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ)  */
+	struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
+	CONFIG_PORT_VAR varCfgPort;	/* cmd = 0x88 (CONFIG_PORT)  */
+	struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */
+	REG_VPI_VAR varRegVpi;		/* cmd = 0x96 (REG_VPI) */
+	UNREG_VPI_VAR varUnregVpi;	/* cmd = 0x97 (UNREG_VPI) */
+	ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+	struct READ_EVENT_LOG_VAR varRdEventLog;	/* cmd = 0x38
+							 * (READ_EVENT_LOG)
+							 */
+	struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI)     */
+} MAILVARIANTS;
+
+/*
+ * SLI-2 specific structures
+ */
+
+struct lpfc_hgp {
+	__le32 cmdPutInx;
+	__le32 rspGetInx;
+};
+
+struct lpfc_pgp {
+	__le32 cmdGetInx;
+	__le32 rspPutInx;
+};
+
+struct sli2_desc {
+	uint32_t unused1[16];
+	struct lpfc_hgp host[MAX_SLI3_RINGS];
+	struct lpfc_pgp port[MAX_SLI3_RINGS];
+};
+
+struct sli3_desc {
+	struct lpfc_hgp host[MAX_SLI3_RINGS];
+	uint32_t reserved[8];
+	uint32_t hbq_put[16];
+};
+
+struct sli3_pgp {
+	struct lpfc_pgp port[MAX_SLI3_RINGS];
+	uint32_t hbq_get[16];
+};
+
+union sli_var {
+	struct sli2_desc	s2;
+	struct sli3_desc	s3;
+	struct sli3_pgp		s3_pgp;
+};
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t mbxStatus;
+	uint8_t mbxCommand;
+	uint8_t mbxReserved:6;
+	uint8_t mbxHc:1;
+	uint8_t mbxOwner:1;	/* Low order bit first word */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t mbxOwner:1;	/* Low order bit first word */
+	uint8_t mbxHc:1;
+	uint8_t mbxReserved:6;
+	uint8_t mbxCommand;
+	uint16_t mbxStatus;
+#endif
+
+	MAILVARIANTS un;
+	union sli_var us;
+} MAILBOX_t;
+
+/*
+ *    Begin Structure Definitions for IOCB Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t statAction;
+	uint8_t statRsn;
+	uint8_t statBaExp;
+	uint8_t statLocalError;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t statLocalError;
+	uint8_t statBaExp;
+	uint8_t statRsn;
+	uint8_t statAction;
+#endif
+	/* statRsn  P/F_RJT reason codes */
+#define RJT_BAD_D_ID       0x01	/* Invalid D_ID field */
+#define RJT_BAD_S_ID       0x02	/* Invalid S_ID field */
+#define RJT_UNAVAIL_TEMP   0x03	/* N_Port unavailable temp. */
+#define RJT_UNAVAIL_PERM   0x04	/* N_Port unavailable perm. */
+#define RJT_UNSUP_CLASS    0x05	/* Class not supported */
+#define RJT_DELIM_ERR      0x06	/* Delimiter usage error */
+#define RJT_UNSUP_TYPE     0x07	/* Type not supported */
+#define RJT_BAD_CONTROL    0x08	/* Invalid link conrtol */
+#define RJT_BAD_RCTL       0x09	/* R_CTL invalid */
+#define RJT_BAD_FCTL       0x0A	/* F_CTL invalid */
+#define RJT_BAD_OXID       0x0B	/* OX_ID invalid */
+#define RJT_BAD_RXID       0x0C	/* RX_ID invalid */
+#define RJT_BAD_SEQID      0x0D	/* SEQ_ID invalid */
+#define RJT_BAD_DFCTL      0x0E	/* DF_CTL invalid */
+#define RJT_BAD_SEQCNT     0x0F	/* SEQ_CNT invalid */
+#define RJT_BAD_PARM       0x10	/* Param. field invalid */
+#define RJT_XCHG_ERR       0x11	/* Exchange error */
+#define RJT_PROT_ERR       0x12	/* Protocol error */
+#define RJT_BAD_LENGTH     0x13	/* Invalid Length */
+#define RJT_UNEXPECTED_ACK 0x14	/* Unexpected ACK */
+#define RJT_LOGIN_REQUIRED 0x16	/* Login required */
+#define RJT_TOO_MANY_SEQ   0x17	/* Excessive sequences */
+#define RJT_XCHG_NOT_STRT  0x18	/* Exchange not started */
+#define RJT_UNSUP_SEC_HDR  0x19	/* Security hdr not supported */
+#define RJT_UNAVAIL_PATH   0x1A	/* Fabric Path not available */
+#define RJT_VENDOR_UNIQUE  0xFF	/* Vendor unique error */
+
+#define IOERR_SUCCESS                 0x00	/* statLocalError */
+#define IOERR_MISSING_CONTINUE        0x01
+#define IOERR_SEQUENCE_TIMEOUT        0x02
+#define IOERR_INTERNAL_ERROR          0x03
+#define IOERR_INVALID_RPI             0x04
+#define IOERR_NO_XRI                  0x05
+#define IOERR_ILLEGAL_COMMAND         0x06
+#define IOERR_XCHG_DROPPED            0x07
+#define IOERR_ILLEGAL_FIELD           0x08
+#define IOERR_BAD_CONTINUE            0x09
+#define IOERR_TOO_MANY_BUFFERS        0x0A
+#define IOERR_RCV_BUFFER_WAITING      0x0B
+#define IOERR_NO_CONNECTION           0x0C
+#define IOERR_TX_DMA_FAILED           0x0D
+#define IOERR_RX_DMA_FAILED           0x0E
+#define IOERR_ILLEGAL_FRAME           0x0F
+#define IOERR_EXTRA_DATA              0x10
+#define IOERR_NO_RESOURCES            0x11
+#define IOERR_RESERVED                0x12
+#define IOERR_ILLEGAL_LENGTH          0x13
+#define IOERR_UNSUPPORTED_FEATURE     0x14
+#define IOERR_ABORT_IN_PROGRESS       0x15
+#define IOERR_ABORT_REQUESTED         0x16
+#define IOERR_RECEIVE_BUFFER_TIMEOUT  0x17
+#define IOERR_LOOP_OPEN_FAILURE       0x18
+#define IOERR_RING_RESET              0x19
+#define IOERR_LINK_DOWN               0x1A
+#define IOERR_CORRUPTED_DATA          0x1B
+#define IOERR_CORRUPTED_RPI           0x1C
+#define IOERR_OUT_OF_ORDER_DATA       0x1D
+#define IOERR_OUT_OF_ORDER_ACK        0x1E
+#define IOERR_DUP_FRAME               0x1F
+#define IOERR_LINK_CONTROL_FRAME      0x20	/* ACK_N received */
+#define IOERR_BAD_HOST_ADDRESS        0x21
+#define IOERR_RCV_HDRBUF_WAITING      0x22
+#define IOERR_MISSING_HDR_BUFFER      0x23
+#define IOERR_MSEQ_CHAIN_CORRUPTED    0x24
+#define IOERR_ABORTMULT_REQUESTED     0x25
+#define IOERR_BUFFER_SHORTAGE         0x28
+#define IOERR_DEFAULT                 0x29
+#define IOERR_CNT                     0x2A
+#define IOERR_SLER_FAILURE            0x46
+#define IOERR_SLER_CMD_RCV_FAILURE    0x47
+#define IOERR_SLER_REC_RJT_ERR        0x48
+#define IOERR_SLER_REC_SRR_RETRY_ERR  0x49
+#define IOERR_SLER_SRR_RJT_ERR        0x4A
+#define IOERR_SLER_RRQ_RJT_ERR        0x4C
+#define IOERR_SLER_RRQ_RETRY_ERR      0x4D
+#define IOERR_SLER_ABTS_ERR           0x4E
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR		0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR	0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR		0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR	0xF3
+#define IOERR_DRVR_MASK               0x100
+#define IOERR_SLI_DOWN                0x101  /* ulpStatus  - Driver defined */
+#define IOERR_SLI_BRESET              0x102
+#define IOERR_SLI_ABORTED             0x103
+#define IOERR_PARAM_MASK              0x1ff
+} PARM_ERR;
+
+typedef union {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint8_t Rctl;	/* R_CTL field */
+		uint8_t Type;	/* TYPE field */
+		uint8_t Dfctl;	/* DF_CTL field */
+		uint8_t Fctl;	/* Bits 0-7 of IOCB word 5 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+		uint8_t Fctl;	/* Bits 0-7 of IOCB word 5 */
+		uint8_t Dfctl;	/* DF_CTL field */
+		uint8_t Type;	/* TYPE field */
+		uint8_t Rctl;	/* R_CTL field */
+#endif
+
+#define BC      0x02		/* Broadcast Received  - Fctl */
+#define SI      0x04		/* Sequence Initiative */
+#define LA      0x08		/* Ignore Link Attention state */
+#define LS      0x80		/* Last Sequence */
+	} hcsw;
+	uint32_t reserved;
+} WORD5;
+
+/* IOCB Command template for a generic response */
+typedef struct {
+	uint32_t reserved[4];
+	PARM_ERR perr;
+} GENERIC_RSP;
+
+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
+typedef struct {
+	struct ulp_bde xrsqbde[2];
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} XR_SEQ_FIELDS;
+
+/* IOCB Command template for ELS_REQUEST */
+typedef struct {
+	struct ulp_bde elsReq;
+	struct ulp_bde elsRsp;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word4Rsvd:7;
+	uint32_t fl:1;
+	uint32_t myID:24;
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t myID:24;
+	uint32_t fl:1;
+	uint32_t word4Rsvd:7;
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST;
+
+/* IOCB Command template for RCV_ELS_REQ */
+typedef struct {
+	struct ulp_bde elsReq[2];
+	uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ;
+
+/* IOCB Command template for ABORT / CLOSE_XRI */
+typedef struct {
+	uint32_t rsvd[3];
+	uint32_t abortType;
+#define ABORT_TYPE_ABTX  0x00000000
+#define ABORT_TYPE_ABTS  0x00000001
+	uint32_t parm;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t abortContextTag; /* ulpContext from command to abort/close */
+	uint16_t abortIoTag;	/* ulpIoTag from command to abort/close */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t abortIoTag;	/* ulpIoTag from command to abort/close */
+	uint16_t abortContextTag; /* ulpContext from command to abort/close */
+#endif
+} AC_XRI;
+
+/* IOCB Command template for ABORT_MXRI64 */
+typedef struct {
+	uint32_t rsvd[3];
+	uint32_t abortType;
+	uint32_t parm;
+	uint32_t iotag32;
+} A_MXRI64;
+
+/* IOCB Command template for GET_RPI */
+typedef struct {
+	uint32_t rsvd[4];
+	uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} GET_RPI;
+
+/* IOCB Command template for all FCP Initiator commands */
+typedef struct {
+	struct ulp_bde fcpi_cmnd;	/* FCP_CMND payload descriptor */
+	struct ulp_bde fcpi_rsp;	/* Rcv buffer */
+	uint32_t fcpi_parm;
+	uint32_t fcpi_XRdy;	/* transfer ready for IWRITE */
+} FCPI_FIELDS;
+
+/* IOCB Command template for all FCP Target commands */
+typedef struct {
+	struct ulp_bde fcpt_Buffer[2];	/* FCP_CMND payload descriptor */
+	uint32_t fcpt_Offset;
+	uint32_t fcpt_Length;	/* transfer ready for IWRITE */
+} FCPT_FIELDS;
+
+/* SLI-2 IOCB structure definitions */
+
+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} XMT_SEQ_FIELDS64;
+
+/* This word is remote ports D_ID for XMIT_ELS_RSP64 */
+#define xmit_els_remoteID xrsqRo
+
+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
+typedef struct {
+	struct ulp_bde64 rcvBde;
+	uint32_t rsvd1;
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} RCV_SEQ_FIELDS64;
+
+/* IOCB Command template for ELS_REQUEST64 */
+typedef struct {
+	ULP_BDL bdl;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word4Rsvd:7;
+	uint32_t fl:1;
+	uint32_t myID:24;
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t myID:24;
+	uint32_t fl:1;
+	uint32_t word4Rsvd:7;
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST64;
+
+/* IOCB Command template for GEN_REQUEST64 */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} GEN_REQUEST64;
+
+/* IOCB Command template for RCV_ELS_REQ64 */
+typedef struct {
+	struct ulp_bde64 elsReq;
+	uint32_t rcvd1;
+	uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ64;
+
+/* IOCB Command template for RCV_SEQ64 */
+struct rcv_seq64 {
+	struct ulp_bde64 elsReq;
+	uint32_t hbq_1;
+	uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rctl:8;
+	uint32_t type:8;
+	uint32_t dfctl:8;
+	uint32_t ls:1;
+	uint32_t fs:1;
+	uint32_t rsvd2:3;
+	uint32_t si:1;
+	uint32_t bc:1;
+	uint32_t rsvd3:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rsvd3:1;
+	uint32_t bc:1;
+	uint32_t si:1;
+	uint32_t rsvd2:3;
+	uint32_t fs:1;
+	uint32_t ls:1;
+	uint32_t dfctl:8;
+	uint32_t type:8;
+	uint32_t rctl:8;
+#endif
+};
+
+/* IOCB Command template for all 64 bit FCP Initiator commands */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t fcpi_parm;
+	uint32_t fcpi_XRdy;	/* transfer ready for IWRITE */
+} FCPI_FIELDS64;
+
+/* IOCB Command template for all 64 bit FCP Target commands */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t fcpt_Offset;
+	uint32_t fcpt_Length;	/* transfer ready for IWRITE */
+} FCPT_FIELDS64;
+
+/* IOCB Command template for Async Status iocb commands */
+typedef struct {
+	uint32_t rsvd[4];
+	uint32_t param;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t evt_code;		/* High order bits word 5 */
+	uint16_t sub_ctxt_tag;		/* Low  order bits word 5 */
+#else   /*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t sub_ctxt_tag;		/* High order bits word 5 */
+	uint16_t evt_code;		/* Low  order bits word 5 */
+#endif
+} ASYNCSTAT_FIELDS;
+#define ASYNC_TEMP_WARN		0x100
+#define ASYNC_TEMP_SAFE		0x101
+#define ASYNC_STATUS_CN		0x102
+
+/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
+   or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
+
+struct rcv_sli3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t ox_id;
+	uint16_t seq_cnt;
+
+	uint16_t vpi;
+	uint16_t word9Rsvd;
+#else  /*  __LITTLE_ENDIAN */
+	uint16_t seq_cnt;
+	uint16_t ox_id;
+
+	uint16_t word9Rsvd;
+	uint16_t vpi;
+#endif
+	uint32_t word10Rsvd;
+	uint32_t acc_len;      /* accumulated length */
+	struct ulp_bde64 bde2;
+};
+
+/* Structure used for a single HBQ entry */
+struct lpfc_hbq_entry {
+	struct ulp_bde64 bde;
+	uint32_t buffer_tag;
+};
+
+/* IOCB Command template for QUE_XRI64_CX (0xB3) command */
+typedef struct {
+	struct lpfc_hbq_entry   buff;
+	uint32_t                rsvd;
+	uint32_t		rsvd1;
+} QUE_XRI64_CX_FIELDS;
+
+struct que_xri64cx_ext_fields {
+	uint32_t	iotag64_low;
+	uint32_t	iotag64_high;
+	uint32_t	ebde_count;
+	uint32_t	rsvd;
+	struct lpfc_hbq_entry	buff[5];
+};
+
+struct sli3_bg_fields {
+	uint32_t filler[6];	/* word 8-13 in IOCB */
+	uint32_t bghm;		/* word 14 - BlockGuard High Water Mark */
+/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
+#define BGS_BIDIR_BG_PROF_MASK		0xff000000
+#define BGS_BIDIR_BG_PROF_SHIFT		24
+#define BGS_BIDIR_ERR_COND_FLAGS_MASK	0x003f0000
+#define BGS_BIDIR_ERR_COND_SHIFT	16
+#define BGS_BG_PROFILE_MASK		0x0000ff00
+#define BGS_BG_PROFILE_SHIFT		8
+#define BGS_INVALID_PROF_MASK		0x00000020
+#define BGS_INVALID_PROF_SHIFT		5
+#define BGS_UNINIT_DIF_BLOCK_MASK	0x00000010
+#define BGS_UNINIT_DIF_BLOCK_SHIFT	4
+#define BGS_HI_WATER_MARK_PRESENT_MASK	0x00000008
+#define BGS_HI_WATER_MARK_PRESENT_SHIFT	3
+#define BGS_REFTAG_ERR_MASK		0x00000004
+#define BGS_REFTAG_ERR_SHIFT		2
+#define BGS_APPTAG_ERR_MASK		0x00000002
+#define BGS_APPTAG_ERR_SHIFT		1
+#define BGS_GUARD_ERR_MASK		0x00000001
+#define BGS_GUARD_ERR_SHIFT		0
+	uint32_t bgstat;	/* word 15 - BlockGuard Status */
+};
+
+static inline uint32_t
+lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
+{
+	return (bgstat & BGS_BIDIR_BG_PROF_MASK) >>
+				BGS_BIDIR_BG_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
+{
+	return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
+				BGS_BIDIR_ERR_COND_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bg_prof(uint32_t bgstat)
+{
+	return (bgstat & BGS_BG_PROFILE_MASK) >>
+				BGS_BG_PROFILE_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_invalid_prof(uint32_t bgstat)
+{
+	return (bgstat & BGS_INVALID_PROF_MASK) >>
+				BGS_INVALID_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
+{
+	return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >>
+				BGS_UNINIT_DIF_BLOCK_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
+{
+	return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >>
+				BGS_HI_WATER_MARK_PRESENT_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_reftag_err(uint32_t bgstat)
+{
+	return (bgstat & BGS_REFTAG_ERR_MASK) >>
+				BGS_REFTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_apptag_err(uint32_t bgstat)
+{
+	return (bgstat & BGS_APPTAG_ERR_MASK) >>
+				BGS_APPTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_guard_err(uint32_t bgstat)
+{
+	return (bgstat & BGS_GUARD_ERR_MASK) >>
+				BGS_GUARD_ERR_SHIFT;
+}
+
+#define LPFC_EXT_DATA_BDE_COUNT 3
+struct fcp_irw_ext {
+	uint32_t	io_tag64_low;
+	uint32_t	io_tag64_high;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t		reserved1;
+	uint8_t		reserved2;
+	uint8_t		reserved3;
+	uint8_t		ebde_count;
+#else  /* __LITTLE_ENDIAN */
+	uint8_t		ebde_count;
+	uint8_t		reserved3;
+	uint8_t		reserved2;
+	uint8_t		reserved1;
+#endif
+	uint32_t	reserved4;
+	struct ulp_bde64 rbde;		/* response bde */
+	struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT];	/* data BDE or BPL */
+	uint8_t icd[32];		/* immediate command data (32 bytes) */
+};
+
+typedef struct _IOCB {	/* IOCB structure */
+	union {
+		GENERIC_RSP grsp;	/* Generic response */
+		XR_SEQ_FIELDS xrseq;	/* XMIT / BCAST / RCV_SEQUENCE cmd */
+		struct ulp_bde cont[3];	/* up to 3 continuation bdes */
+		RCV_ELS_REQ rcvels;	/* RCV_ELS_REQ template */
+		AC_XRI acxri;	/* ABORT / CLOSE_XRI template */
+		A_MXRI64 amxri;	/* abort multiple xri command overlay */
+		GET_RPI getrpi;	/* GET_RPI template */
+		FCPI_FIELDS fcpi;	/* FCP Initiator template */
+		FCPT_FIELDS fcpt;	/* FCP target template */
+
+		/* SLI-2 structures */
+
+		struct ulp_bde64 cont64[2];  /* up to 2 64 bit continuation
+					      * bde_64s */
+		ELS_REQUEST64 elsreq64;	/* ELS_REQUEST template */
+		GEN_REQUEST64 genreq64;	/* GEN_REQUEST template */
+		RCV_ELS_REQ64 rcvels64;	/* RCV_ELS_REQ template */
+		XMT_SEQ_FIELDS64 xseq64;	/* XMIT / BCAST cmd */
+		FCPI_FIELDS64 fcpi64;	/* FCP 64 bit Initiator template */
+		FCPT_FIELDS64 fcpt64;	/* FCP 64 bit target template */
+		ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
+		QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
+		struct rcv_seq64 rcvseq64;	/* RCV_SEQ64 and RCV_CONT64 */
+		struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
+		uint32_t ulpWord[IOCB_WORD_SZ - 2];	/* generic 6 'words' */
+	} un;
+	union {
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint16_t ulpContext;	/* High order bits word 6 */
+			uint16_t ulpIoTag;	/* Low  order bits word 6 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint16_t ulpIoTag;	/* Low  order bits word 6 */
+			uint16_t ulpContext;	/* High order bits word 6 */
+#endif
+		} t1;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint16_t ulpContext;	/* High order bits word 6 */
+			uint16_t ulpIoTag1:2;	/* Low  order bits word 6 */
+			uint16_t ulpIoTag0:14;	/* Low  order bits word 6 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint16_t ulpIoTag0:14;	/* Low  order bits word 6 */
+			uint16_t ulpIoTag1:2;	/* Low  order bits word 6 */
+			uint16_t ulpContext;	/* High order bits word 6 */
+#endif
+		} t2;
+	} un1;
+#define ulpContext un1.t1.ulpContext
+#define ulpIoTag   un1.t1.ulpIoTag
+#define ulpIoTag0  un1.t2.ulpIoTag0
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t ulpTimeout:8;
+	uint32_t ulpXS:1;
+	uint32_t ulpFCP2Rcvy:1;
+	uint32_t ulpPU:2;
+	uint32_t ulpIr:1;
+	uint32_t ulpClass:3;
+	uint32_t ulpCommand:8;
+	uint32_t ulpStatus:4;
+	uint32_t ulpBdeCount:2;
+	uint32_t ulpLe:1;
+	uint32_t ulpOwner:1;	/* Low order bit word 7 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ulpOwner:1;	/* Low order bit word 7 */
+	uint32_t ulpLe:1;
+	uint32_t ulpBdeCount:2;
+	uint32_t ulpStatus:4;
+	uint32_t ulpCommand:8;
+	uint32_t ulpClass:3;
+	uint32_t ulpIr:1;
+	uint32_t ulpPU:2;
+	uint32_t ulpFCP2Rcvy:1;
+	uint32_t ulpXS:1;
+	uint32_t ulpTimeout:8;
+#endif
+
+	union {
+		struct rcv_sli3 rcvsli3; /* words 8 - 15 */
+
+		/* words 8-31 used for que_xri_cx iocb */
+		struct que_xri64cx_ext_fields que_xri64cx_ext_words;
+		struct fcp_irw_ext fcp_ext;
+		uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+
+		/* words 8-15 for BlockGuard */
+		struct sli3_bg_fields sli3_bg;
+	} unsli3;
+
+#define ulpCt_h ulpXS
+#define ulpCt_l ulpFCP2Rcvy
+
+#define IOCB_FCP	   1	/* IOCB is used for FCP ELS cmds-ulpRsvByte */
+#define IOCB_IP		   2	/* IOCB is used for IP ELS cmds */
+#define PARM_UNUSED        0	/* PU field (Word 4) not used */
+#define PARM_REL_OFF       1	/* PU field (Word 4) = R. O. */
+#define PARM_READ_CHECK    2	/* PU field (Word 4) = Data Transfer Length */
+#define PARM_NPIV_DID	   3
+#define CLASS1             0	/* Class 1 */
+#define CLASS2             1	/* Class 2 */
+#define CLASS3             2	/* Class 3 */
+#define CLASS_FCP_INTERMIX 7	/* FCP Data->Cls 1, all else->Cls 2 */
+
+#define IOSTAT_SUCCESS         0x0	/* ulpStatus  - HBA defined */
+#define IOSTAT_FCP_RSP_ERROR   0x1
+#define IOSTAT_REMOTE_STOP     0x2
+#define IOSTAT_LOCAL_REJECT    0x3
+#define IOSTAT_NPORT_RJT       0x4
+#define IOSTAT_FABRIC_RJT      0x5
+#define IOSTAT_NPORT_BSY       0x6
+#define IOSTAT_FABRIC_BSY      0x7
+#define IOSTAT_INTERMED_RSP    0x8
+#define IOSTAT_LS_RJT          0x9
+#define IOSTAT_BA_RJT          0xA
+#define IOSTAT_RSVD1           0xB
+#define IOSTAT_RSVD2           0xC
+#define IOSTAT_RSVD3           0xD
+#define IOSTAT_RSVD4           0xE
+#define IOSTAT_NEED_BUFFER     0xF
+#define IOSTAT_DRIVER_REJECT   0x10   /* ulpStatus  - Driver defined */
+#define IOSTAT_DEFAULT         0xF    /* Same as rsvd5 for now */
+#define IOSTAT_CNT             0x11
+
+} IOCB_t;
+
+
+#define SLI1_SLIM_SIZE   (4 * 1024)
+
+/* Up to 498 IOCBs will fit into 16k
+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+#define SLI2_SLIM_SIZE   (64 * 1024)
+
+/* Maximum IOCBs that will fit in SLI2 slim */
+#define MAX_SLI2_IOCB    498
+#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
+			    (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
+			    sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
+
+/* HBQ entries are 4 words each = 4k */
+#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) *  \
+			     lpfc_sli_hbq_count())
+
+struct lpfc_sli2_slim {
+	MAILBOX_t mbx;
+	uint32_t  mbx_ext_words[MAILBOX_EXT_WSIZE];
+	PCB_t pcb;
+	IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
+};
+
+/*
+ * This function checks PCI device to allow special handling for LC HBAs.
+ *
+ * Parameters:
+ * device : struct pci_dev 's device field
+ *
+ * return 1 => TRUE
+ *        0 => FALSE
+ */
+static inline int
+lpfc_is_LC_HBA(unsigned short device)
+{
+	if ((device == PCI_DEVICE_ID_TFLY) ||
+	    (device == PCI_DEVICE_ID_PFLY) ||
+	    (device == PCI_DEVICE_ID_LP101) ||
+	    (device == PCI_DEVICE_ID_BMID) ||
+	    (device == PCI_DEVICE_ID_BSMB) ||
+	    (device == PCI_DEVICE_ID_ZMID) ||
+	    (device == PCI_DEVICE_ID_ZSMB) ||
+	    (device == PCI_DEVICE_ID_SAT_MID) ||
+	    (device == PCI_DEVICE_ID_SAT_SMB) ||
+	    (device == PCI_DEVICE_ID_RFLY))
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * Determine if an IOCB failed because of a link event or firmware reset.
+ */
+
+static inline int
+lpfc_error_lost_link(IOCB_t *iocbp)
+{
+	return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+		(iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
+		 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
+		 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+}
+
+#define MENLO_TRANSPORT_TYPE 0xfe
+#define MENLO_CONTEXT 0
+#define MENLO_PU 3
+#define MENLO_TIMEOUT 30
+#define SETVAR_MLOMNT 0x103107
+#define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hw4.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 0000000..2b14596
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,4555 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *	struct temp {
+ *		uint32_t	field1;
+ *		uint32_t	field2;
+ *		uint32_t	field3;
+ *		uint32_t	field4;
+ *	#define example_bit_field_SHIFT		7
+ *	#define example_bit_field_MASK		0x03
+ *	#define example_bit_field_WORD		field4
+ *		uint32_t	field5;
+ *	};
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *	struct temp t1;
+ *	value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *	bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *	bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get_be32(name, ptr) \
+	((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bf_get_le32(name, ptr) \
+	((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bf_get(name, ptr) \
+	(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set_le32(name, ptr, value) \
+	((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+	name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+	~(name##_MASK << name##_SHIFT)))))
+#define bf_set(name, ptr, value) \
+	((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+		 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+};
+
+struct lpfc_sli_intf {
+	uint32_t word0;
+#define lpfc_sli_intf_valid_SHIFT		29
+#define lpfc_sli_intf_valid_MASK		0x00000007
+#define lpfc_sli_intf_valid_WORD		word0
+#define LPFC_SLI_INTF_VALID		6
+#define lpfc_sli_intf_sli_hint2_SHIFT		24
+#define lpfc_sli_intf_sli_hint2_MASK		0x0000001F
+#define lpfc_sli_intf_sli_hint2_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT2_NONE	0
+#define lpfc_sli_intf_sli_hint1_SHIFT		16
+#define lpfc_sli_intf_sli_hint1_MASK		0x000000FF
+#define lpfc_sli_intf_sli_hint1_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT1_NONE	0
+#define LPFC_SLI_INTF_SLI_HINT1_1	1
+#define LPFC_SLI_INTF_SLI_HINT1_2	2
+#define lpfc_sli_intf_if_type_SHIFT		12
+#define lpfc_sli_intf_if_type_MASK		0x0000000F
+#define lpfc_sli_intf_if_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_0		0
+#define LPFC_SLI_INTF_IF_TYPE_1		1
+#define LPFC_SLI_INTF_IF_TYPE_2		2
+#define lpfc_sli_intf_sli_family_SHIFT		8
+#define lpfc_sli_intf_sli_family_MASK		0x0000000F
+#define lpfc_sli_intf_sli_family_WORD		word0
+#define LPFC_SLI_INTF_FAMILY_BE2	0x0
+#define LPFC_SLI_INTF_FAMILY_BE3	0x1
+#define LPFC_SLI_INTF_FAMILY_LNCR_A0	0xa
+#define LPFC_SLI_INTF_FAMILY_LNCR_B0	0xb
+#define lpfc_sli_intf_slirev_SHIFT		4
+#define lpfc_sli_intf_slirev_MASK		0x0000000F
+#define lpfc_sli_intf_slirev_WORD		word0
+#define LPFC_SLI_INTF_REV_SLI3		3
+#define LPFC_SLI_INTF_REV_SLI4		4
+#define lpfc_sli_intf_func_type_SHIFT		0
+#define lpfc_sli_intf_func_type_MASK		0x00000001
+#define lpfc_sli_intf_func_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_PHYS	0
+#define LPFC_SLI_INTF_IF_TYPE_VIRT	1
+};
+
+#define LPFC_SLI4_MBX_EMBED	true
+#define LPFC_SLI4_MBX_NEMBED	false
+
+#define LPFC_SLI4_MB_WORD_COUNT		64
+#define LPFC_MAX_MQ_PAGE		8
+#define LPFC_MAX_WQ_PAGE_V0		4
+#define LPFC_MAX_WQ_PAGE		8
+#define LPFC_MAX_RQ_PAGE		8
+#define LPFC_MAX_CQ_PAGE		4
+#define LPFC_MAX_EQ_PAGE		8
+
+#define LPFC_VIR_FUNC_MAX       32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX        5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE	0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE	16
+#define LPFC_ALIGN_64_BYTE	64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET	256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE	0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI	0x2
+#define LPFC_BMBX_BIT1_ADDR_LO	0
+#define LPFC_RPI_HDR_COUNT	64
+#define LPFC_HDR_TEMPLATE_SIZE	4096
+#define LPFC_RPI_ALLOC_ERROR 	0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT	132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX	 0
+
+/* Virtual function numbers */
+#define LPFC_VF0		0
+#define LPFC_VF1		1
+#define LPFC_VF2		2
+#define LPFC_VF3		3
+#define LPFC_VF4		4
+#define LPFC_VF5		5
+#define LPFC_VF6		6
+#define LPFC_VF7		7
+#define LPFC_VF8		8
+#define LPFC_VF9		9
+#define LPFC_VF10		10
+#define LPFC_VF11		11
+#define LPFC_VF12		12
+#define LPFC_VF13		13
+#define LPFC_VF14		14
+#define LPFC_VF15		15
+#define LPFC_VF16		16
+#define LPFC_VF17		17
+#define LPFC_VF18		18
+#define LPFC_VF19		19
+#define LPFC_VF20		20
+#define LPFC_VF21		21
+#define LPFC_VF22		22
+#define LPFC_VF23		23
+#define LPFC_VF24		24
+#define LPFC_VF25		25
+#define LPFC_VF26		26
+#define LPFC_VF27		27
+#define LPFC_VF28		28
+#define LPFC_VF29		29
+#define LPFC_VF30		30
+#define LPFC_VF31		31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0		0
+#define LPFC_PCI_FUNC1		1
+#define LPFC_PCI_FUNC2		2
+#define LPFC_PCI_FUNC3		3
+#define LPFC_PCI_FUNC4		4
+
+/* SLI4 interface type-2 PDEV_CTL register */
+#define LPFC_CTL_PDEV_CTL_OFFSET	0x414
+#define LPFC_CTL_PDEV_CTL_DRST		0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST		0x00000002
+#define LPFC_CTL_PDEV_CTL_DD		0x00000004
+#define LPFC_CTL_PDEV_CTL_LC		0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL	0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE	0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC	0x20
+
+#define LPFC_FW_DUMP_REQUEST    (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT	4
+
+/* Algrithmns for scheduling FCP commands to WQs */
+#define	LPFC_FCP_SCHED_ROUND_ROBIN	0
+#define	LPFC_FCP_SCHED_BY_CPU		1
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST       651042
+#define LPFC_DMULT_MAX         1023
+
+/* Configuration of Interrupts / sec for entire HBA port */
+#define LPFC_MIN_IMAX          5000
+#define LPFC_MAX_IMAX          5000000
+#define LPFC_DEF_IMAX          150000
+
+#define LPFC_MIN_CPU_MAP       0
+#define LPFC_MAX_CPU_MAP       2
+#define LPFC_HBA_CPU_MAP       1
+#define LPFC_DRIVER_CPU_MAP    2  /* Default */
+
+/* PORT_CAPABILITIES constants. */
+#define LPFC_MAX_SUPPORTED_PAGES	8
+
+struct ulp_bde64 {
+	union ULP_BDE_TUS {
+		uint32_t w;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64    0x00	/* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01	/* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P   0x02	/* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I   0x08	/* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP  0x0A	/* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64    0x40	/* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P   0x42	/* BLP (Port-resident) */
+		} f;
+	} tus;
+	uint32_t addrLow;
+	uint32_t addrHigh;
+};
+
+/* Maximun size of immediate data that can fit into a 128 byte WQE */
+#define LPFC_MAX_BDE_IMM_SIZE	64
+
+struct lpfc_sli4_flags {
+	uint32_t word0;
+#define lpfc_idx_rsrc_rdy_SHIFT		0
+#define lpfc_idx_rsrc_rdy_MASK		0x00000001
+#define lpfc_idx_rsrc_rdy_WORD		word0
+#define LPFC_IDX_RSRC_RDY		1
+#define lpfc_rpi_rsrc_rdy_SHIFT		1
+#define lpfc_rpi_rsrc_rdy_MASK		0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD		word0
+#define LPFC_RPI_RSRC_RDY		1
+#define lpfc_vpi_rsrc_rdy_SHIFT		2
+#define lpfc_vpi_rsrc_rdy_MASK		0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD		word0
+#define LPFC_VPI_RSRC_RDY		1
+#define lpfc_vfi_rsrc_rdy_SHIFT		3
+#define lpfc_vfi_rsrc_rdy_MASK		0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD		word0
+#define LPFC_VFI_RSRC_RDY		1
+};
+
+struct sli4_bls_rsp {
+	uint32_t word0_rsvd;      /* Word0 must be reserved */
+	uint32_t word1;
+#define lpfc_abts_orig_SHIFT      0
+#define lpfc_abts_orig_MASK       0x00000001
+#define lpfc_abts_orig_WORD       word1
+#define LPFC_ABTS_UNSOL_RSP       1
+#define LPFC_ABTS_UNSOL_INT       0
+	uint32_t word2;
+#define lpfc_abts_rxid_SHIFT      0
+#define lpfc_abts_rxid_MASK       0x0000FFFF
+#define lpfc_abts_rxid_WORD       word2
+#define lpfc_abts_oxid_SHIFT      16
+#define lpfc_abts_oxid_MASK       0x0000FFFF
+#define lpfc_abts_oxid_WORD       word2
+	uint32_t word3;
+#define lpfc_vndr_code_SHIFT	0
+#define lpfc_vndr_code_MASK	0x000000FF
+#define lpfc_vndr_code_WORD	word3
+#define lpfc_rsn_expln_SHIFT	8
+#define lpfc_rsn_expln_MASK	0x000000FF
+#define lpfc_rsn_expln_WORD	word3
+#define lpfc_rsn_code_SHIFT	16
+#define lpfc_rsn_code_MASK	0x000000FF
+#define lpfc_rsn_code_WORD	word3
+
+	uint32_t word4;
+	uint32_t word5_rsvd;	/* Word5 must be reserved */
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+	uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT	16
+#define lpfc_eqe_resource_id_MASK	0x0000FFFF
+#define lpfc_eqe_resource_id_WORD	word0
+#define lpfc_eqe_minor_code_SHIFT	4
+#define lpfc_eqe_minor_code_MASK	0x00000FFF
+#define lpfc_eqe_minor_code_WORD	word0
+#define lpfc_eqe_major_code_SHIFT	1
+#define lpfc_eqe_major_code_MASK	0x00000007
+#define lpfc_eqe_major_code_WORD	word0
+#define lpfc_eqe_valid_SHIFT		0
+#define lpfc_eqe_valid_MASK		0x00000001
+#define lpfc_eqe_valid_WORD		word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t word3;
+#define lpfc_cqe_valid_SHIFT		31
+#define lpfc_cqe_valid_MASK		0x00000001
+#define lpfc_cqe_valid_WORD		word3
+#define lpfc_cqe_code_SHIFT		16
+#define lpfc_cqe_code_MASK		0x000000FF
+#define lpfc_cqe_code_WORD		word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS		0x0
+#define CQE_STATUS_FCP_RSP_FAILURE	0x1
+#define CQE_STATUS_REMOTE_STOP		0x2
+#define CQE_STATUS_LOCAL_REJECT		0x3
+#define CQE_STATUS_NPORT_RJT		0x4
+#define CQE_STATUS_FABRIC_RJT		0x5
+#define CQE_STATUS_NPORT_BSY		0x6
+#define CQE_STATUS_FABRIC_BSY		0x7
+#define CQE_STATUS_INTERMED_RSP		0x8
+#define CQE_STATUS_LS_RJT		0x9
+#define CQE_STATUS_CMD_REJECT		0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK	0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY	0xf
+#define CQE_STATUS_DI_ERROR		0x16
+
+/* Used when mapping CQE status to IOCB */
+#define LPFC_IOCB_STATUS_MASK		0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR		0x0
+#define CQE_HW_STATUS_UNDERRUN		0x1
+#define CQE_HW_STATUS_OVERRUN		0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE		0x1
+#define CQE_CODE_RELEASE_WQE		0x2
+#define CQE_CODE_RECEIVE		0x4
+#define CQE_CODE_XRI_ABORTED		0x5
+#define CQE_CODE_RECEIVE_V1		0x9
+#define CQE_CODE_NVME_ERSP		0xd
+
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK		0x1FF
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+	uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT	16
+#define lpfc_wcqe_c_request_tag_MASK	0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD	word0
+#define lpfc_wcqe_c_status_SHIFT	8
+#define lpfc_wcqe_c_status_MASK		0x000000FF
+#define lpfc_wcqe_c_status_WORD		word0
+#define lpfc_wcqe_c_hw_status_SHIFT	0
+#define lpfc_wcqe_c_hw_status_MASK	0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD	word0
+#define lpfc_wcqe_c_ersp0_SHIFT		0
+#define lpfc_wcqe_c_ersp0_MASK		0x0000FFFF
+#define lpfc_wcqe_c_ersp0_WORD		word0
+	uint32_t total_data_placed;
+	uint32_t parameter;
+#define lpfc_wcqe_c_bg_edir_SHIFT	5
+#define lpfc_wcqe_c_bg_edir_MASK	0x00000001
+#define lpfc_wcqe_c_bg_edir_WORD	parameter
+#define lpfc_wcqe_c_bg_tdpv_SHIFT	3
+#define lpfc_wcqe_c_bg_tdpv_MASK	0x00000001
+#define lpfc_wcqe_c_bg_tdpv_WORD	parameter
+#define lpfc_wcqe_c_bg_re_SHIFT		2
+#define lpfc_wcqe_c_bg_re_MASK		0x00000001
+#define lpfc_wcqe_c_bg_re_WORD		parameter
+#define lpfc_wcqe_c_bg_ae_SHIFT		1
+#define lpfc_wcqe_c_bg_ae_MASK		0x00000001
+#define lpfc_wcqe_c_bg_ae_WORD		parameter
+#define lpfc_wcqe_c_bg_ge_SHIFT		0
+#define lpfc_wcqe_c_bg_ge_MASK		0x00000001
+#define lpfc_wcqe_c_bg_ge_WORD		parameter
+	uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT		28
+#define lpfc_wcqe_c_xb_MASK		0x00000001
+#define lpfc_wcqe_c_xb_WORD		word3
+#define lpfc_wcqe_c_pv_SHIFT		27
+#define lpfc_wcqe_c_pv_MASK		0x00000001
+#define lpfc_wcqe_c_pv_WORD		word3
+#define lpfc_wcqe_c_priority_SHIFT	24
+#define lpfc_wcqe_c_priority_MASK	0x00000007
+#define lpfc_wcqe_c_priority_WORD	word3
+#define lpfc_wcqe_c_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD		lpfc_cqe_code_WORD
+#define lpfc_wcqe_c_sqhead_SHIFT	0
+#define lpfc_wcqe_c_sqhead_MASK		0x0000FFFF
+#define lpfc_wcqe_c_sqhead_WORD		word3
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT		16
+#define lpfc_wcqe_r_wq_id_MASK		0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD		word2
+#define lpfc_wcqe_r_wqe_index_SHIFT	0
+#define lpfc_wcqe_r_wqe_index_MASK	0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD	word2
+	uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD		lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+	uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT		8
+#define lpfc_wcqe_xa_status_MASK		0x000000FF
+#define lpfc_wcqe_xa_status_WORD		word0
+	uint32_t parameter;
+	uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT	16
+#define lpfc_wcqe_xa_remote_xid_MASK	0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD	word2
+#define lpfc_wcqe_xa_xri_SHIFT		0
+#define lpfc_wcqe_xa_xri_MASK		0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD		word2
+	uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT	lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT		30
+#define lpfc_wcqe_xa_ia_MASK		0x00000001
+#define lpfc_wcqe_xa_ia_WORD		word3
+#define CQE_XRI_ABORTED_IA_REMOTE	0
+#define CQE_XRI_ABORTED_IA_LOCAL	1
+#define lpfc_wcqe_xa_br_SHIFT		29
+#define lpfc_wcqe_xa_br_MASK		0x00000001
+#define lpfc_wcqe_xa_br_WORD		word3
+#define CQE_XRI_ABORTED_BR_BA_ACC	0
+#define CQE_XRI_ABORTED_BR_BA_RJT	1
+#define lpfc_wcqe_xa_eo_SHIFT		28
+#define lpfc_wcqe_xa_eo_MASK		0x00000001
+#define lpfc_wcqe_xa_eo_WORD		word3
+#define CQE_XRI_ABORTED_EO_REMOTE	0
+#define CQE_XRI_ABORTED_EO_LOCAL	1
+#define lpfc_wcqe_xa_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+	uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT		16
+#define lpfc_rcqe_bindex_MASK		0x0000FFF
+#define lpfc_rcqe_bindex_WORD		word0
+#define lpfc_rcqe_status_SHIFT		8
+#define lpfc_rcqe_status_MASK		0x000000FF
+#define lpfc_rcqe_status_WORD		word0
+#define FC_STATUS_RQ_SUCCESS		0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 	0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF 	0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC 	0x13 /* Frame Discard */
+	uint32_t word1;
+#define lpfc_rcqe_fcf_id_v1_SHIFT	0
+#define lpfc_rcqe_fcf_id_v1_MASK	0x0000003F
+#define lpfc_rcqe_fcf_id_v1_WORD	word1
+	uint32_t word2;
+#define lpfc_rcqe_length_SHIFT		16
+#define lpfc_rcqe_length_MASK		0x0000FFFF
+#define lpfc_rcqe_length_WORD		word2
+#define lpfc_rcqe_rq_id_SHIFT		6
+#define lpfc_rcqe_rq_id_MASK		0x000003FF
+#define lpfc_rcqe_rq_id_WORD		word2
+#define lpfc_rcqe_fcf_id_SHIFT		0
+#define lpfc_rcqe_fcf_id_MASK		0x0000003F
+#define lpfc_rcqe_fcf_id_WORD		word2
+#define lpfc_rcqe_rq_id_v1_SHIFT	0
+#define lpfc_rcqe_rq_id_v1_MASK		0x0000FFFF
+#define lpfc_rcqe_rq_id_v1_WORD		word2
+	uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT		30
+#define lpfc_rcqe_port_MASK		0x00000001
+#define lpfc_rcqe_port_WORD		word3
+#define lpfc_rcqe_hdr_length_SHIFT	24
+#define lpfc_rcqe_hdr_length_MASK	0x0000001F
+#define lpfc_rcqe_hdr_length_WORD	word3
+#define lpfc_rcqe_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD		lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT		8
+#define lpfc_rcqe_eof_MASK		0x000000FF
+#define lpfc_rcqe_eof_WORD		word3
+#define FCOE_EOFn	0x41
+#define FCOE_EOFt	0x42
+#define FCOE_EOFni	0x49
+#define FCOE_EOFa	0x50
+#define lpfc_rcqe_sof_SHIFT		0
+#define lpfc_rcqe_sof_MASK		0x000000FF
+#define lpfc_rcqe_sof_WORD		word3
+#define FCOE_SOFi2	0x2d
+#define FCOE_SOFi3	0x2e
+#define FCOE_SOFn2	0x35
+#define FCOE_SOFn3	0x36
+};
+
+struct lpfc_rqe {
+	uint32_t address_hi;
+	uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+	uint32_t word2;
+#define lpfc_bde4_last_SHIFT		31
+#define lpfc_bde4_last_MASK		0x00000001
+#define lpfc_bde4_last_WORD		word2
+#define lpfc_bde4_sge_offset_SHIFT	0
+#define lpfc_bde4_sge_offset_MASK	0x000003FF
+#define lpfc_bde4_sge_offset_WORD	word2
+	uint32_t word3;
+#define lpfc_bde4_length_SHIFT		0
+#define lpfc_bde4_length_MASK		0x000000FF
+#define lpfc_bde4_length_WORD		word3
+};
+
+struct lpfc_register {
+	uint32_t word0;
+};
+
+#define LPFC_PORT_SEM_UE_RECOVERABLE    0xE000
+#define LPFC_PORT_SEM_MASK		0xF000
+/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
+#define LPFC_UERR_STATUS_HI		0x00A4
+#define LPFC_UERR_STATUS_LO		0x00A0
+#define LPFC_UE_MASK_HI			0x00AC
+#define LPFC_UE_MASK_LO			0x00A8
+
+/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
+#define LPFC_SLI_INTF			0x0058
+
+#define LPFC_CTL_PORT_SEM_OFFSET	0x400
+#define lpfc_port_smphr_perr_SHIFT	31
+#define lpfc_port_smphr_perr_MASK	0x1
+#define lpfc_port_smphr_perr_WORD	word0
+#define lpfc_port_smphr_sfi_SHIFT	30
+#define lpfc_port_smphr_sfi_MASK	0x1
+#define lpfc_port_smphr_sfi_WORD	word0
+#define lpfc_port_smphr_nip_SHIFT	29
+#define lpfc_port_smphr_nip_MASK	0x1
+#define lpfc_port_smphr_nip_WORD	word0
+#define lpfc_port_smphr_ipc_SHIFT	28
+#define lpfc_port_smphr_ipc_MASK	0x1
+#define lpfc_port_smphr_ipc_WORD	word0
+#define lpfc_port_smphr_scr1_SHIFT	27
+#define lpfc_port_smphr_scr1_MASK	0x1
+#define lpfc_port_smphr_scr1_WORD	word0
+#define lpfc_port_smphr_scr2_SHIFT	26
+#define lpfc_port_smphr_scr2_MASK	0x1
+#define lpfc_port_smphr_scr2_WORD	word0
+#define lpfc_port_smphr_host_scratch_SHIFT	16
+#define lpfc_port_smphr_host_scratch_MASK	0xFF
+#define lpfc_port_smphr_host_scratch_WORD	word0
+#define lpfc_port_smphr_port_status_SHIFT	0
+#define lpfc_port_smphr_port_status_MASK	0xFFFF
+#define lpfc_port_smphr_port_status_WORD	word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET			0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY		0x0001
+#define LPFC_POST_STAGE_HOST_RDY			0x0002
+#define LPFC_POST_STAGE_BE_RESET			0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START		0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE			0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START		0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE			0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START		0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE		0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START			0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE			0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START		0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE		0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START		0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE		0x0701
+#define LPFC_POST_STAGE_ARMFW_START			0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START		0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE			0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START	0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE	0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET			0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK			0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE		0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP			0x0B03
+#define LPFC_POST_STAGE_PARSE_XML			0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE			0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE			0x0B06
+#define LPFC_POST_STAGE_RC_DONE				0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM			0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS			0x0C00
+#define LPFC_POST_STAGE_PORT_READY			0xC000
+#define LPFC_POST_STAGE_PORT_UE 			0xF000
+
+#define LPFC_CTL_PORT_STA_OFFSET	0x404
+#define lpfc_sliport_status_err_SHIFT	31
+#define lpfc_sliport_status_err_MASK	0x1
+#define lpfc_sliport_status_err_WORD	word0
+#define lpfc_sliport_status_end_SHIFT	30
+#define lpfc_sliport_status_end_MASK	0x1
+#define lpfc_sliport_status_end_WORD	word0
+#define lpfc_sliport_status_oti_SHIFT	29
+#define lpfc_sliport_status_oti_MASK	0x1
+#define lpfc_sliport_status_oti_WORD	word0
+#define lpfc_sliport_status_rn_SHIFT	24
+#define lpfc_sliport_status_rn_MASK	0x1
+#define lpfc_sliport_status_rn_WORD	word0
+#define lpfc_sliport_status_rdy_SHIFT	23
+#define lpfc_sliport_status_rdy_MASK	0x1
+#define lpfc_sliport_status_rdy_WORD	word0
+#define MAX_IF_TYPE_2_RESETS		6
+
+#define LPFC_CTL_PORT_CTL_OFFSET	0x408
+#define lpfc_sliport_ctrl_end_SHIFT	30
+#define lpfc_sliport_ctrl_end_MASK	0x1
+#define lpfc_sliport_ctrl_end_WORD	word0
+#define LPFC_SLIPORT_LITTLE_ENDIAN 0
+#define LPFC_SLIPORT_BIG_ENDIAN	   1
+#define lpfc_sliport_ctrl_ip_SHIFT	27
+#define lpfc_sliport_ctrl_ip_MASK	0x1
+#define lpfc_sliport_ctrl_ip_WORD	word0
+#define LPFC_SLIPORT_INIT_PORT	1
+
+#define LPFC_CTL_PORT_ER1_OFFSET	0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET	0x410
+
+#define LPFC_CTL_PORT_EQ_DELAY_OFFSET	0x418
+#define lpfc_sliport_eqdelay_delay_SHIFT 16
+#define lpfc_sliport_eqdelay_delay_MASK	0xffff
+#define lpfc_sliport_eqdelay_delay_WORD	word0
+#define lpfc_sliport_eqdelay_id_SHIFT	0
+#define lpfc_sliport_eqdelay_id_MASK	0xfff
+#define lpfc_sliport_eqdelay_id_WORD	word0
+#define LPFC_SEC_TO_USEC		1000000
+
+/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
+ * reside in BAR 2.
+ */
+#define LPFC_SLIPORT_IF0_SMPHR	0x00AC
+
+#define LPFC_IMR_MASK_ALL	0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
+
+#define LPFC_HST_ISR0		0x0C18
+#define LPFC_HST_ISR1		0x0C1C
+#define LPFC_HST_ISR2		0x0C20
+#define LPFC_HST_ISR3		0x0C24
+#define LPFC_HST_ISR4		0x0C28
+
+#define LPFC_HST_IMR0		0x0C48
+#define LPFC_HST_IMR1		0x0C4C
+#define LPFC_HST_IMR2		0x0C50
+#define LPFC_HST_IMR3		0x0C54
+#define LPFC_HST_IMR4		0x0C58
+
+#define LPFC_HST_ISCR0		0x0C78
+#define LPFC_HST_ISCR1		0x0C7C
+#define LPFC_HST_ISCR2		0x0C80
+#define LPFC_HST_ISCR3		0x0C84
+#define LPFC_HST_ISCR4		0x0C88
+
+#define LPFC_SLI4_INTR0			BIT0
+#define LPFC_SLI4_INTR1			BIT1
+#define LPFC_SLI4_INTR2			BIT2
+#define LPFC_SLI4_INTR3			BIT3
+#define LPFC_SLI4_INTR4			BIT4
+#define LPFC_SLI4_INTR5			BIT5
+#define LPFC_SLI4_INTR6			BIT6
+#define LPFC_SLI4_INTR7			BIT7
+#define LPFC_SLI4_INTR8			BIT8
+#define LPFC_SLI4_INTR9			BIT9
+#define LPFC_SLI4_INTR10		BIT10
+#define LPFC_SLI4_INTR11		BIT11
+#define LPFC_SLI4_INTR12		BIT12
+#define LPFC_SLI4_INTR13		BIT13
+#define LPFC_SLI4_INTR14		BIT14
+#define LPFC_SLI4_INTR15		BIT15
+#define LPFC_SLI4_INTR16		BIT16
+#define LPFC_SLI4_INTR17		BIT17
+#define LPFC_SLI4_INTR18		BIT18
+#define LPFC_SLI4_INTR19		BIT19
+#define LPFC_SLI4_INTR20		BIT20
+#define LPFC_SLI4_INTR21		BIT21
+#define LPFC_SLI4_INTR22		BIT22
+#define LPFC_SLI4_INTR23		BIT23
+#define LPFC_SLI4_INTR24		BIT24
+#define LPFC_SLI4_INTR25		BIT25
+#define LPFC_SLI4_INTR26		BIT26
+#define LPFC_SLI4_INTR27		BIT27
+#define LPFC_SLI4_INTR28		BIT28
+#define LPFC_SLI4_INTR29		BIT29
+#define LPFC_SLI4_INTR30		BIT30
+#define LPFC_SLI4_INTR31		BIT31
+
+/*
+ * The Doorbell registers defined here exist in different BAR
+ * register sets depending on the UCNA Port's reported if_type
+ * value.  For UCNA ports running SLI4 and if_type 0, they reside in
+ * BAR4.  For UCNA ports running SLI4 and if_type 2, they reside in
+ * BAR0.  The offsets are the same so the driver must account for
+ * any base address difference.
+ */
+#define LPFC_ULP0_RQ_DOORBELL		0x00A0
+#define LPFC_ULP1_RQ_DOORBELL		0x00C0
+#define lpfc_rq_db_list_fm_num_posted_SHIFT	24
+#define lpfc_rq_db_list_fm_num_posted_MASK	0x00FF
+#define lpfc_rq_db_list_fm_num_posted_WORD	word0
+#define lpfc_rq_db_list_fm_index_SHIFT		16
+#define lpfc_rq_db_list_fm_index_MASK		0x00FF
+#define lpfc_rq_db_list_fm_index_WORD		word0
+#define lpfc_rq_db_list_fm_id_SHIFT		0
+#define lpfc_rq_db_list_fm_id_MASK		0xFFFF
+#define lpfc_rq_db_list_fm_id_WORD		word0
+#define lpfc_rq_db_ring_fm_num_posted_SHIFT	16
+#define lpfc_rq_db_ring_fm_num_posted_MASK	0x3FFF
+#define lpfc_rq_db_ring_fm_num_posted_WORD	word0
+#define lpfc_rq_db_ring_fm_id_SHIFT		0
+#define lpfc_rq_db_ring_fm_id_MASK		0xFFFF
+#define lpfc_rq_db_ring_fm_id_WORD		word0
+
+#define LPFC_ULP0_WQ_DOORBELL		0x0040
+#define LPFC_ULP1_WQ_DOORBELL		0x0060
+#define lpfc_wq_db_list_fm_num_posted_SHIFT	24
+#define lpfc_wq_db_list_fm_num_posted_MASK	0x00FF
+#define lpfc_wq_db_list_fm_num_posted_WORD	word0
+#define lpfc_wq_db_list_fm_index_SHIFT		16
+#define lpfc_wq_db_list_fm_index_MASK		0x00FF
+#define lpfc_wq_db_list_fm_index_WORD		word0
+#define lpfc_wq_db_list_fm_id_SHIFT		0
+#define lpfc_wq_db_list_fm_id_MASK		0xFFFF
+#define lpfc_wq_db_list_fm_id_WORD		word0
+#define lpfc_wq_db_ring_fm_num_posted_SHIFT     16
+#define lpfc_wq_db_ring_fm_num_posted_MASK      0x3FFF
+#define lpfc_wq_db_ring_fm_num_posted_WORD      word0
+#define lpfc_wq_db_ring_fm_id_SHIFT             0
+#define lpfc_wq_db_ring_fm_id_MASK              0xFFFF
+#define lpfc_wq_db_ring_fm_id_WORD              word0
+
+#define LPFC_EQCQ_DOORBELL		0x0120
+#define lpfc_eqcq_doorbell_se_SHIFT		31
+#define lpfc_eqcq_doorbell_se_MASK		0x0001
+#define lpfc_eqcq_doorbell_se_WORD		word0
+#define LPFC_EQCQ_SOLICIT_ENABLE_OFF	0
+#define LPFC_EQCQ_SOLICIT_ENABLE_ON	1
+#define lpfc_eqcq_doorbell_arm_SHIFT		29
+#define lpfc_eqcq_doorbell_arm_MASK		0x0001
+#define lpfc_eqcq_doorbell_arm_WORD		word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT	16
+#define lpfc_eqcq_doorbell_num_released_MASK	0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD	word0
+#define lpfc_eqcq_doorbell_qt_SHIFT		10
+#define lpfc_eqcq_doorbell_qt_MASK		0x0001
+#define lpfc_eqcq_doorbell_qt_WORD		word0
+#define LPFC_QUEUE_TYPE_COMPLETION	0
+#define LPFC_QUEUE_TYPE_EVENT		1
+#define lpfc_eqcq_doorbell_eqci_SHIFT		9
+#define lpfc_eqcq_doorbell_eqci_MASK		0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_lo_SHIFT	0
+#define lpfc_eqcq_doorbell_cqid_lo_MASK		0x03FF
+#define lpfc_eqcq_doorbell_cqid_lo_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_hi_SHIFT	11
+#define lpfc_eqcq_doorbell_cqid_hi_MASK		0x001F
+#define lpfc_eqcq_doorbell_cqid_hi_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_lo_SHIFT	0
+#define lpfc_eqcq_doorbell_eqid_lo_MASK		0x01FF
+#define lpfc_eqcq_doorbell_eqid_lo_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_hi_SHIFT	11
+#define lpfc_eqcq_doorbell_eqid_hi_MASK		0x001F
+#define lpfc_eqcq_doorbell_eqid_hi_WORD		word0
+#define LPFC_CQID_HI_FIELD_SHIFT		10
+#define LPFC_EQID_HI_FIELD_SHIFT		9
+
+#define LPFC_BMBX			0x0160
+#define lpfc_bmbx_addr_SHIFT		2
+#define lpfc_bmbx_addr_MASK		0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD		word0
+#define lpfc_bmbx_hi_SHIFT		1
+#define lpfc_bmbx_hi_MASK		0x0001
+#define lpfc_bmbx_hi_WORD		word0
+#define lpfc_bmbx_rdy_SHIFT		0
+#define lpfc_bmbx_rdy_MASK		0x0001
+#define lpfc_bmbx_rdy_WORD		word0
+
+#define LPFC_MQ_DOORBELL			0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT	16
+#define lpfc_mq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD	word0
+#define lpfc_mq_doorbell_id_SHIFT		0
+#define lpfc_mq_doorbell_id_MASK		0xFFFF
+#define lpfc_mq_doorbell_id_WORD		word0
+
+struct lpfc_sli4_cfg_mhdr {
+	uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT		0
+#define lpfc_mbox_hdr_emb_MASK		0x00000001
+#define lpfc_mbox_hdr_emb_WORD		word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT	3
+#define lpfc_mbox_hdr_sge_cnt_MASK	0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD	word1
+	uint32_t payload_length;
+	uint32_t tag_lo;
+	uint32_t tag_hi;
+	uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT	0
+#define lpfc_mbox_hdr_opcode_MASK	0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD	word6
+#define lpfc_mbox_hdr_subsystem_SHIFT	8
+#define lpfc_mbox_hdr_subsystem_MASK	0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD	word6
+#define lpfc_mbox_hdr_port_number_SHIFT	16
+#define lpfc_mbox_hdr_port_number_MASK	0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD	word6
+#define lpfc_mbox_hdr_domain_SHIFT	24
+#define lpfc_mbox_hdr_domain_MASK	0x000000FF
+#define lpfc_mbox_hdr_domain_WORD	word6
+		uint32_t timeout;
+		uint32_t request_length;
+		uint32_t word9;
+#define lpfc_mbox_hdr_version_SHIFT	0
+#define lpfc_mbox_hdr_version_MASK	0x000000FF
+#define lpfc_mbox_hdr_version_WORD	word9
+#define lpfc_mbox_hdr_pf_num_SHIFT	16
+#define lpfc_mbox_hdr_pf_num_MASK	0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD	word9
+#define lpfc_mbox_hdr_vh_num_SHIFT	24
+#define lpfc_mbox_hdr_vh_num_MASK	0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD	word9
+#define LPFC_Q_CREATE_VERSION_2	2
+#define LPFC_Q_CREATE_VERSION_1	1
+#define LPFC_Q_CREATE_VERSION_0	0
+#define LPFC_OPCODE_VERSION_0	0
+#define LPFC_OPCODE_VERSION_1	1
+	} request;
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT		0
+#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD		word6
+#define lpfc_mbox_hdr_subsystem_SHIFT		8
+#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD		word6
+#define lpfc_mbox_hdr_domain_SHIFT		24
+#define lpfc_mbox_hdr_domain_MASK		0x000000FF
+#define lpfc_mbox_hdr_domain_WORD		word6
+		uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT		0
+#define lpfc_mbox_hdr_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_status_WORD		word7
+#define lpfc_mbox_hdr_add_status_SHIFT		8
+#define lpfc_mbox_hdr_add_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD		word7
+		uint32_t response_length;
+		uint32_t actual_response_length;
+	} response;
+};
+
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
+struct mbox_header {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+#define LPFC_EXTENT_LOCAL		0
+#define LPFC_TIMEOUT_DEFAULT		0
+#define LPFC_EXTENT_VERSION_DEFAULT	0
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_NA		0x0
+#define LPFC_MBOX_SUBSYSTEM_COMMON	0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE	0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0   0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1	0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_NA				0x00
+#define LPFC_MBOX_OPCODE_CQ_CREATE			0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE			0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE			0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES		0x20
+#define LPFC_MBOX_OPCODE_NOP				0x21
+#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY		0x29
+#define LPFC_MBOX_OPCODE_MQ_DESTROY			0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY			0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY			0x37
+#define LPFC_MBOX_OPCODE_QUERY_FW_CFG			0x3A
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET			0x3D
+#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG	0x3E
+#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG		0x43
+#define LPFC_MBOX_OPCODE_SET_BEACON_CONFIG              0x45
+#define LPFC_MBOX_OPCODE_GET_BEACON_CONFIG              0x46
+#define LPFC_MBOX_OPCODE_GET_PORT_NAME			0x4D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT			0x5A
+#define LPFC_MBOX_OPCODE_GET_VPD_DATA			0x5B
+#define LPFC_MBOX_OPCODE_SET_HOST_DATA			0x5D
+#define LPFC_MBOX_OPCODE_SEND_ACTIVATION		0x73
+#define LPFC_MBOX_OPCODE_RESET_LICENSES			0x74
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO		0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT		0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT		0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT		0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG		0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES		0xA1
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG		0xA4
+#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG		0xA5
+#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST		0xA6
+#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE		0xA8
+#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG	0xA9
+#define LPFC_MBOX_OPCODE_READ_OBJECT			0xAB
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT			0xAC
+#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST		0xAD
+#define LPFC_MBOX_OPCODE_DELETE_OBJECT			0xAE
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS		0xB5
+#define LPFC_MBOX_OPCODE_SET_FEATURES                   0xBF
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE			0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY		0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES		0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES		0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE			0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY		0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE		0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF			0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF		0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE		0x0B
+#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF		0x10
+#define LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET		0x1D
+#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS	0x21
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE		0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK	0x23
+
+/* Mailbox command structures */
+struct eq_context {
+	uint32_t word0;
+#define lpfc_eq_context_size_SHIFT	31
+#define lpfc_eq_context_size_MASK	0x00000001
+#define lpfc_eq_context_size_WORD	word0
+#define LPFC_EQE_SIZE_4			0x0
+#define LPFC_EQE_SIZE_16		0x1
+#define lpfc_eq_context_valid_SHIFT	29
+#define lpfc_eq_context_valid_MASK	0x00000001
+#define lpfc_eq_context_valid_WORD	word0
+	uint32_t word1;
+#define lpfc_eq_context_count_SHIFT	26
+#define lpfc_eq_context_count_MASK	0x00000003
+#define lpfc_eq_context_count_WORD	word1
+#define LPFC_EQ_CNT_256		0x0
+#define LPFC_EQ_CNT_512		0x1
+#define LPFC_EQ_CNT_1024	0x2
+#define LPFC_EQ_CNT_2048	0x3
+#define LPFC_EQ_CNT_4096	0x4
+	uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT	13
+#define lpfc_eq_context_delay_multi_MASK	0x000003FF
+#define lpfc_eq_context_delay_multi_WORD	word2
+	uint32_t reserved3;
+};
+
+struct eq_delay_info {
+	uint32_t eq_id;
+	uint32_t phase;
+	uint32_t delay_multi;
+};
+#define	LPFC_MAX_EQ_DELAY_EQID_CNT	8
+
+struct sgl_page_pairs {
+	uint32_t sgl_pg0_addr_lo;
+	uint32_t sgl_pg0_addr_hi;
+	uint32_t sgl_pg1_addr_lo;
+	uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+	struct mbox_header header;
+	uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT	0
+#define lpfc_post_sgl_pages_xri_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD	word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT	16
+#define lpfc_post_sgl_pages_xricnt_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD	word0
+	struct sgl_page_pairs  sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word0;
+	struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+	uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES	19
+	struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+	void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT	0
+#define lpfc_mbx_eq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD	word0
+			struct eq_context context;
+			struct dma_address page[LPFC_MAX_EQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT	0
+#define lpfc_mbx_eq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_modify_eq_delay {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t num_eq;
+			struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_eq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_eq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_nop {
+	struct mbox_header header;
+	uint32_t context[2];
+};
+
+struct cq_context {
+	uint32_t word0;
+#define lpfc_cq_context_event_SHIFT	31
+#define lpfc_cq_context_event_MASK	0x00000001
+#define lpfc_cq_context_event_WORD	word0
+#define lpfc_cq_context_valid_SHIFT	29
+#define lpfc_cq_context_valid_MASK	0x00000001
+#define lpfc_cq_context_valid_WORD	word0
+#define lpfc_cq_context_count_SHIFT	27
+#define lpfc_cq_context_count_MASK	0x00000003
+#define lpfc_cq_context_count_WORD	word0
+#define LPFC_CQ_CNT_256		0x0
+#define LPFC_CQ_CNT_512		0x1
+#define LPFC_CQ_CNT_1024	0x2
+	uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT		22	/* Version 0 Only */
+#define lpfc_cq_eq_id_MASK		0x000000FF
+#define lpfc_cq_eq_id_WORD		word1
+#define lpfc_cq_eq_id_2_SHIFT		0 	/* Version 2 Only */
+#define lpfc_cq_eq_id_2_MASK		0x0000FFFF
+#define lpfc_cq_eq_id_2_WORD		word1
+	uint32_t reserved0;
+	uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_page_size_SHIFT	16	/* Version 2 Only */
+#define lpfc_mbx_cq_create_page_size_MASK	0x000000FF
+#define lpfc_mbx_cq_create_page_size_WORD	word0
+#define lpfc_mbx_cq_create_num_pages_SHIFT	0
+#define lpfc_mbx_cq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD	word0
+			struct cq_context context;
+			struct dma_address page[LPFC_MAX_CQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT	0
+#define lpfc_mbx_cq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_cq_create_set {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_set_page_size_SHIFT	16	/* Version 2 Only */
+#define lpfc_mbx_cq_create_set_page_size_MASK	0x000000FF
+#define lpfc_mbx_cq_create_set_page_size_WORD	word0
+#define lpfc_mbx_cq_create_set_num_pages_SHIFT	0
+#define lpfc_mbx_cq_create_set_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_num_pages_WORD	word0
+			uint32_t word1;
+#define lpfc_mbx_cq_create_set_evt_SHIFT	31
+#define lpfc_mbx_cq_create_set_evt_MASK		0x00000001
+#define lpfc_mbx_cq_create_set_evt_WORD		word1
+#define lpfc_mbx_cq_create_set_valid_SHIFT	29
+#define lpfc_mbx_cq_create_set_valid_MASK	0x00000001
+#define lpfc_mbx_cq_create_set_valid_WORD	word1
+#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT	27
+#define lpfc_mbx_cq_create_set_cqe_cnt_MASK	0x00000003
+#define lpfc_mbx_cq_create_set_cqe_cnt_WORD	word1
+#define lpfc_mbx_cq_create_set_cqe_size_SHIFT	25
+#define lpfc_mbx_cq_create_set_cqe_size_MASK	0x00000003
+#define lpfc_mbx_cq_create_set_cqe_size_WORD	word1
+#define lpfc_mbx_cq_create_set_auto_SHIFT	15
+#define lpfc_mbx_cq_create_set_auto_MASK	0x0000001
+#define lpfc_mbx_cq_create_set_auto_WORD	word1
+#define lpfc_mbx_cq_create_set_nodelay_SHIFT	14
+#define lpfc_mbx_cq_create_set_nodelay_MASK	0x00000001
+#define lpfc_mbx_cq_create_set_nodelay_WORD	word1
+#define lpfc_mbx_cq_create_set_clswm_SHIFT	12
+#define lpfc_mbx_cq_create_set_clswm_MASK	0x00000003
+#define lpfc_mbx_cq_create_set_clswm_WORD	word1
+			uint32_t word2;
+#define lpfc_mbx_cq_create_set_arm_SHIFT	31
+#define lpfc_mbx_cq_create_set_arm_MASK		0x00000001
+#define lpfc_mbx_cq_create_set_arm_WORD		word2
+#define lpfc_mbx_cq_create_set_num_cq_SHIFT	0
+#define lpfc_mbx_cq_create_set_num_cq_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_num_cq_WORD	word2
+			uint32_t word3;
+#define lpfc_mbx_cq_create_set_eq_id1_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id1_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id1_WORD	word3
+#define lpfc_mbx_cq_create_set_eq_id0_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id0_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id0_WORD	word3
+			uint32_t word4;
+#define lpfc_mbx_cq_create_set_eq_id3_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id3_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id3_WORD	word4
+#define lpfc_mbx_cq_create_set_eq_id2_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id2_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id2_WORD	word4
+			uint32_t word5;
+#define lpfc_mbx_cq_create_set_eq_id5_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id5_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id5_WORD	word5
+#define lpfc_mbx_cq_create_set_eq_id4_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id4_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id4_WORD	word5
+			uint32_t word6;
+#define lpfc_mbx_cq_create_set_eq_id7_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id7_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id7_WORD	word6
+#define lpfc_mbx_cq_create_set_eq_id6_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id6_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id6_WORD	word6
+			uint32_t word7;
+#define lpfc_mbx_cq_create_set_eq_id9_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id9_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id9_WORD	word7
+#define lpfc_mbx_cq_create_set_eq_id8_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id8_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id8_WORD	word7
+			uint32_t word8;
+#define lpfc_mbx_cq_create_set_eq_id11_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id11_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id11_WORD	word8
+#define lpfc_mbx_cq_create_set_eq_id10_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id10_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id10_WORD	word8
+			uint32_t word9;
+#define lpfc_mbx_cq_create_set_eq_id13_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id13_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id13_WORD	word9
+#define lpfc_mbx_cq_create_set_eq_id12_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id12_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id12_WORD	word9
+			uint32_t word10;
+#define lpfc_mbx_cq_create_set_eq_id15_SHIFT	16
+#define lpfc_mbx_cq_create_set_eq_id15_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id15_WORD	word10
+#define lpfc_mbx_cq_create_set_eq_id14_SHIFT	0
+#define lpfc_mbx_cq_create_set_eq_id14_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id14_WORD	word10
+			struct dma_address page[1];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_set_num_alloc_SHIFT	16
+#define lpfc_mbx_cq_create_set_num_alloc_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_num_alloc_WORD	word0
+#define lpfc_mbx_cq_create_set_base_id_SHIFT	0
+#define lpfc_mbx_cq_create_set_base_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_set_base_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_cq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_cq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct wq_context {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+	struct mbox_header header;
+	union {
+		struct {	/* Version 0 Request */
+			uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT	0
+#define lpfc_mbx_wq_create_num_pages_MASK	0x000000FF
+#define lpfc_mbx_wq_create_num_pages_WORD	word0
+#define lpfc_mbx_wq_create_dua_SHIFT		8
+#define lpfc_mbx_wq_create_dua_MASK		0x00000001
+#define lpfc_mbx_wq_create_dua_WORD		word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT		16
+#define lpfc_mbx_wq_create_cq_id_MASK		0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD		word0
+			struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
+			uint32_t word9;
+#define lpfc_mbx_wq_create_bua_SHIFT		0
+#define lpfc_mbx_wq_create_bua_MASK		0x00000001
+#define lpfc_mbx_wq_create_bua_WORD		word9
+#define lpfc_mbx_wq_create_ulp_num_SHIFT	8
+#define lpfc_mbx_wq_create_ulp_num_MASK		0x000000FF
+#define lpfc_mbx_wq_create_ulp_num_WORD		word9
+		} request;
+		struct {	/* Version 1 Request */
+			uint32_t word0;	/* Word 0 is the same as in v0 */
+			uint32_t word1;
+#define lpfc_mbx_wq_create_page_size_SHIFT	0
+#define lpfc_mbx_wq_create_page_size_MASK	0x000000FF
+#define lpfc_mbx_wq_create_page_size_WORD	word1
+#define LPFC_WQ_PAGE_SIZE_4096	0x1
+#define lpfc_mbx_wq_create_wqe_size_SHIFT	8
+#define lpfc_mbx_wq_create_wqe_size_MASK	0x0000000F
+#define lpfc_mbx_wq_create_wqe_size_WORD	word1
+#define LPFC_WQ_WQE_SIZE_64	0x5
+#define LPFC_WQ_WQE_SIZE_128	0x6
+#define lpfc_mbx_wq_create_wqe_count_SHIFT	16
+#define lpfc_mbx_wq_create_wqe_count_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_wqe_count_WORD	word1
+			uint32_t word2;
+			struct dma_address page[LPFC_MAX_WQ_PAGE-1];
+		} request_1;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT	0
+#define lpfc_mbx_wq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD	word0
+			uint32_t doorbell_offset;
+			uint32_t word2;
+#define lpfc_mbx_wq_create_bar_set_SHIFT	0
+#define lpfc_mbx_wq_create_bar_set_MASK		0x0000FFFF
+#define lpfc_mbx_wq_create_bar_set_WORD		word2
+#define WQ_PCI_BAR_0_AND_1	0x00
+#define WQ_PCI_BAR_2_AND_3	0x01
+#define WQ_PCI_BAR_4_AND_5	0x02
+#define lpfc_mbx_wq_create_db_format_SHIFT	16
+#define lpfc_mbx_wq_create_db_format_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_db_format_WORD	word2
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_wq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_wq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 2048
+#define LPFC_NVMET_DATA_BUF_SIZE 128
+struct rq_context {
+	uint32_t word0;
+#define lpfc_rq_context_rqe_count_SHIFT	16	/* Version 0 Only */
+#define lpfc_rq_context_rqe_count_MASK	0x0000000F
+#define lpfc_rq_context_rqe_count_WORD	word0
+#define LPFC_RQ_RING_SIZE_512		9	/* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024		10	/* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048		11	/* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096		12	/* 4096 entries */
+#define lpfc_rq_context_rqe_count_1_SHIFT	16	/* Version 1-2 Only */
+#define lpfc_rq_context_rqe_count_1_MASK	0x0000FFFF
+#define lpfc_rq_context_rqe_count_1_WORD	word0
+#define lpfc_rq_context_rqe_size_SHIFT	8		/* Version 1-2 Only */
+#define lpfc_rq_context_rqe_size_MASK	0x0000000F
+#define lpfc_rq_context_rqe_size_WORD	word0
+#define LPFC_RQE_SIZE_8		2
+#define LPFC_RQE_SIZE_16	3
+#define LPFC_RQE_SIZE_32	4
+#define LPFC_RQE_SIZE_64	5
+#define LPFC_RQE_SIZE_128	6
+#define lpfc_rq_context_page_size_SHIFT	0		/* Version 1 Only */
+#define lpfc_rq_context_page_size_MASK	0x000000FF
+#define lpfc_rq_context_page_size_WORD	word0
+#define	LPFC_RQ_PAGE_SIZE_4096	0x1
+	uint32_t word1;
+#define lpfc_rq_context_data_size_SHIFT	16		/* Version 2 Only */
+#define lpfc_rq_context_data_size_MASK	0x0000FFFF
+#define lpfc_rq_context_data_size_WORD	word1
+#define lpfc_rq_context_hdr_size_SHIFT	0		/* Version 2 Only */
+#define lpfc_rq_context_hdr_size_MASK	0x0000FFFF
+#define lpfc_rq_context_hdr_size_WORD	word1
+	uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT	16
+#define lpfc_rq_context_cq_id_MASK	0x000003FF
+#define lpfc_rq_context_cq_id_WORD	word2
+#define lpfc_rq_context_buf_size_SHIFT	0
+#define lpfc_rq_context_buf_size_MASK	0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD	word2
+#define lpfc_rq_context_base_cq_SHIFT	0		/* Version 2 Only */
+#define lpfc_rq_context_base_cq_MASK	0x0000FFFF
+#define lpfc_rq_context_base_cq_WORD	word2
+	uint32_t buffer_size;				/* Version 1 Only */
+};
+
+struct lpfc_mbx_rq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT	0
+#define lpfc_mbx_rq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD	word0
+#define lpfc_mbx_rq_create_dua_SHIFT		16
+#define lpfc_mbx_rq_create_dua_MASK		0x00000001
+#define lpfc_mbx_rq_create_dua_WORD		word0
+#define lpfc_mbx_rq_create_bqu_SHIFT		17
+#define lpfc_mbx_rq_create_bqu_MASK		0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD		word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT	24
+#define lpfc_mbx_rq_create_ulp_num_MASK		0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD		word0
+			struct rq_context context;
+			struct dma_address page[LPFC_MAX_RQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT	16
+#define lpfc_mbx_rq_create_q_cnt_v2_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_q_cnt_v2_WORD	word0
+#define lpfc_mbx_rq_create_q_id_SHIFT		0
+#define lpfc_mbx_rq_create_q_id_MASK		0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD		word0
+			uint32_t doorbell_offset;
+			uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT	0
+#define lpfc_mbx_rq_create_bar_set_MASK		0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD		word2
+#define lpfc_mbx_rq_create_db_format_SHIFT	16
+#define lpfc_mbx_rq_create_db_format_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD	word2
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_rq_create_v2 {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT	0
+#define lpfc_mbx_rq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD	word0
+#define lpfc_mbx_rq_create_rq_cnt_SHIFT		16
+#define lpfc_mbx_rq_create_rq_cnt_MASK		0x000000FF
+#define lpfc_mbx_rq_create_rq_cnt_WORD		word0
+#define lpfc_mbx_rq_create_dua_SHIFT		16
+#define lpfc_mbx_rq_create_dua_MASK		0x00000001
+#define lpfc_mbx_rq_create_dua_WORD		word0
+#define lpfc_mbx_rq_create_bqu_SHIFT		17
+#define lpfc_mbx_rq_create_bqu_MASK		0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD		word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT	24
+#define lpfc_mbx_rq_create_ulp_num_MASK		0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD		word0
+#define lpfc_mbx_rq_create_dim_SHIFT		29
+#define lpfc_mbx_rq_create_dim_MASK		0x00000001
+#define lpfc_mbx_rq_create_dim_WORD		word0
+#define lpfc_mbx_rq_create_dfd_SHIFT		30
+#define lpfc_mbx_rq_create_dfd_MASK		0x00000001
+#define lpfc_mbx_rq_create_dfd_WORD		word0
+#define lpfc_mbx_rq_create_dnb_SHIFT		31
+#define lpfc_mbx_rq_create_dnb_MASK		0x00000001
+#define lpfc_mbx_rq_create_dnb_WORD		word0
+			struct rq_context context;
+			struct dma_address page[1];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT	16
+#define lpfc_mbx_rq_create_q_cnt_v2_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_q_cnt_v2_WORD	word0
+#define lpfc_mbx_rq_create_q_id_SHIFT		0
+#define lpfc_mbx_rq_create_q_id_MASK		0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD		word0
+			uint32_t doorbell_offset;
+			uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT	0
+#define lpfc_mbx_rq_create_bar_set_MASK		0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD		word2
+#define lpfc_mbx_rq_create_db_format_SHIFT	16
+#define lpfc_mbx_rq_create_db_format_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD	word2
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_rq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_rq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct mq_context {
+	uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT	22 	/* Version 0 Only */
+#define lpfc_mq_context_cq_id_MASK	0x000003FF
+#define lpfc_mq_context_cq_id_WORD	word0
+#define lpfc_mq_context_ring_size_SHIFT	16
+#define lpfc_mq_context_ring_size_MASK	0x0000000F
+#define lpfc_mq_context_ring_size_WORD	word0
+#define LPFC_MQ_RING_SIZE_16		0x5
+#define LPFC_MQ_RING_SIZE_32		0x6
+#define LPFC_MQ_RING_SIZE_64		0x7
+#define LPFC_MQ_RING_SIZE_128		0x8
+	uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT	31
+#define lpfc_mq_context_valid_MASK	0x00000001
+#define lpfc_mq_context_valid_WORD	word1
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD	word0
+			struct mq_context context;
+			struct dma_address page[LPFC_MAX_MQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT	0
+#define lpfc_mbx_mq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_mq_create_ext {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD	word0
+#define lpfc_mbx_mq_create_ext_cq_id_SHIFT	16	/* Version 1 Only */
+#define lpfc_mbx_mq_create_ext_cq_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_ext_cq_id_WORD	word0
+			uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT	LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD	async_evt_bmap
+#define LPFC_EVT_CODE_LINK_NO_LINK	0x0
+#define LPFC_EVT_CODE_LINK_10_MBIT	0x1
+#define LPFC_EVT_CODE_LINK_100_MBIT	0x2
+#define LPFC_EVT_CODE_LINK_1_GBIT	0x3
+#define LPFC_EVT_CODE_LINK_10_GBIT	0x4
+#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT	LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT	LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT	LPFC_TRAILER_CODE_FC
+#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD	async_evt_bmap
+#define LPFC_EVT_CODE_FC_NO_LINK	0x0
+#define LPFC_EVT_CODE_FC_1_GBAUD	0x1
+#define LPFC_EVT_CODE_FC_2_GBAUD	0x2
+#define LPFC_EVT_CODE_FC_4_GBAUD	0x4
+#define LPFC_EVT_CODE_FC_8_GBAUD	0x8
+#define LPFC_EVT_CODE_FC_10_GBAUD	0xA
+#define LPFC_EVT_CODE_FC_16_GBAUD	0x10
+#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT	LPFC_TRAILER_CODE_SLI
+#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD	async_evt_bmap
+			struct mq_context context;
+			struct dma_address page[LPFC_MAX_MQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT	0
+#define lpfc_mbx_mq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD	word0
+		} response;
+	} u;
+#define LPFC_ASYNC_EVENT_LINK_STATE	0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE	0x4
+#define LPFC_ASYNC_EVENT_GROUP5		0x20
+};
+
+struct lpfc_mbx_mq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_mq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI	0x20
+#define LPFC_RSC_TYPE_FCOE_VPI	0x21
+#define LPFC_RSC_TYPE_FCOE_RPI	0x22
+#define LPFC_RSC_TYPE_FCOE_XRI	0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT	0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK		0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD		word4
+		} req;
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT		0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK		0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD		word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT	16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK		0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD		word4
+		} rsp;
+	} u;
+};
+
+struct lpfc_mbx_query_fw_config {
+	struct mbox_header header;
+	struct {
+		uint32_t config_number;
+#define	LPFC_FC_FCOE		0x00000007
+		uint32_t asic_revision;
+		uint32_t physical_port;
+		uint32_t function_mode;
+#define LPFC_FCOE_INI_MODE	0x00000040
+#define LPFC_FCOE_TGT_MODE	0x00000080
+#define LPFC_DUA_MODE		0x00000800
+		uint32_t ulp0_mode;
+#define LPFC_ULP_FCOE_INIT_MODE	0x00000040
+#define LPFC_ULP_FCOE_TGT_MODE	0x00000080
+		uint32_t ulp0_nap_words[12];
+		uint32_t ulp1_mode;
+		uint32_t ulp1_nap_words[12];
+		uint32_t function_capabilities;
+		uint32_t cqid_base;
+		uint32_t cqid_tot;
+		uint32_t eqid_base;
+		uint32_t eqid_tot;
+		uint32_t ulp0_nap2_words[2];
+		uint32_t ulp1_nap2_words[2];
+	} rsp;
+};
+
+struct lpfc_mbx_set_beacon_config {
+	struct mbox_header header;
+	uint32_t word4;
+#define lpfc_mbx_set_beacon_port_num_SHIFT		0
+#define lpfc_mbx_set_beacon_port_num_MASK		0x0000003F
+#define lpfc_mbx_set_beacon_port_num_WORD		word4
+#define lpfc_mbx_set_beacon_port_type_SHIFT		6
+#define lpfc_mbx_set_beacon_port_type_MASK		0x00000003
+#define lpfc_mbx_set_beacon_port_type_WORD		word4
+#define lpfc_mbx_set_beacon_state_SHIFT			8
+#define lpfc_mbx_set_beacon_state_MASK			0x000000FF
+#define lpfc_mbx_set_beacon_state_WORD			word4
+#define lpfc_mbx_set_beacon_duration_SHIFT		16
+#define lpfc_mbx_set_beacon_duration_MASK		0x000000FF
+#define lpfc_mbx_set_beacon_duration_WORD		word4
+#define lpfc_mbx_set_beacon_status_duration_SHIFT	24
+#define lpfc_mbx_set_beacon_status_duration_MASK	0x000000FF
+#define lpfc_mbx_set_beacon_status_duration_WORD	word4
+};
+
+struct lpfc_id_range {
+	uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT	0
+#define lpfc_mbx_rsrc_id_word4_0_MASK	0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD	word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT	16
+#define lpfc_mbx_rsrc_id_word4_1_MASK	0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD	word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT	0
+#define lpfc_mbx_set_diag_state_diag_MASK	0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD	word0
+#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT	2
+#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK	0x00000001
+#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD	word0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE	0
+#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE		1
+#define lpfc_mbx_set_diag_state_link_num_SHIFT	16
+#define lpfc_mbx_set_diag_state_link_num_MASK	0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD	word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK	0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD	word0
+		} req;
+		struct {
+			uint32_t word0;
+		} rsp;
+	} u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT	0
+#define lpfc_mbx_set_diag_lpbk_type_MASK	0x00000003
+#define lpfc_mbx_set_diag_lpbk_type_WORD	word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE		0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL	0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES		0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT	16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK	0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD	word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT	22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK	0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD	word0
+		} req;
+		struct {
+			uint32_t word0;
+		} rsp;
+	} u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT	16
+#define lpfc_mbx_run_diag_test_link_num_MASK	0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD	word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT	22
+#define lpfc_mbx_run_diag_test_link_type_MASK	0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD	word0
+			uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT	0
+#define lpfc_mbx_run_diag_test_test_id_MASK	0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD	word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT	16
+#define lpfc_mbx_run_diag_test_loops_MASK	0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD	word1
+			uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT	0
+#define lpfc_mbx_run_diag_test_test_ver_MASK	0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD	word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT	16
+#define lpfc_mbx_run_diag_test_err_act_MASK	0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD	word2
+		} req;
+		struct {
+			uint32_t word0;
+		} rsp;
+	} u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges.  For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT	0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK	0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD	word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT	16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK	0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD	word4
+		} req;
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT	0
+#define lpfc_mbx_rsrc_cnt_MASK	0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD	word4
+			struct lpfc_id_range id[53];
+		} rsp;
+	} u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above.  This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word4;
+	struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+	struct mbox_header header;
+	struct {
+		uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT	0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK		0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD		word4
+	} req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
+struct lpfc_mbx_post_hdr_tmpl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT  0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK   0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD   word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT   16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK    0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD    word10
+	uint32_t rpi_paddr_lo;
+	uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge {	/* SLI-4 */
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+
+	uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT	0
+#define lpfc_sli4_sge_offset_MASK	0x07FFFFFF
+#define lpfc_sli4_sge_offset_WORD	word2
+#define lpfc_sli4_sge_type_SHIFT	27
+#define lpfc_sli4_sge_type_MASK		0x0000000F
+#define lpfc_sli4_sge_type_WORD		word2
+#define LPFC_SGE_TYPE_DATA		0x0
+#define LPFC_SGE_TYPE_DIF		0x4
+#define LPFC_SGE_TYPE_LSP		0x5
+#define LPFC_SGE_TYPE_PEDIF		0x6
+#define LPFC_SGE_TYPE_PESEED		0x7
+#define LPFC_SGE_TYPE_DISEED		0x8
+#define LPFC_SGE_TYPE_ENC		0x9
+#define LPFC_SGE_TYPE_ATM		0xA
+#define LPFC_SGE_TYPE_SKIP		0xC
+#define lpfc_sli4_sge_last_SHIFT	31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_last_MASK		0x00000001
+#define lpfc_sli4_sge_last_WORD		word2
+	uint32_t sge_len;
+};
+
+struct sli4_sge_diseed {	/* SLI-4 */
+	uint32_t ref_tag;
+	uint32_t ref_tag_tran;
+
+	uint32_t word2;
+#define lpfc_sli4_sge_dif_apptran_SHIFT	0
+#define lpfc_sli4_sge_dif_apptran_MASK	0x0000FFFF
+#define lpfc_sli4_sge_dif_apptran_WORD	word2
+#define lpfc_sli4_sge_dif_af_SHIFT	24
+#define lpfc_sli4_sge_dif_af_MASK	0x00000001
+#define lpfc_sli4_sge_dif_af_WORD	word2
+#define lpfc_sli4_sge_dif_na_SHIFT	25
+#define lpfc_sli4_sge_dif_na_MASK	0x00000001
+#define lpfc_sli4_sge_dif_na_WORD	word2
+#define lpfc_sli4_sge_dif_hi_SHIFT	26
+#define lpfc_sli4_sge_dif_hi_MASK	0x00000001
+#define lpfc_sli4_sge_dif_hi_WORD	word2
+#define lpfc_sli4_sge_dif_type_SHIFT	27
+#define lpfc_sli4_sge_dif_type_MASK	0x0000000F
+#define lpfc_sli4_sge_dif_type_WORD	word2
+#define lpfc_sli4_sge_dif_last_SHIFT	31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_dif_last_MASK	0x00000001
+#define lpfc_sli4_sge_dif_last_WORD	word2
+	uint32_t word3;
+#define lpfc_sli4_sge_dif_apptag_SHIFT	0
+#define lpfc_sli4_sge_dif_apptag_MASK	0x0000FFFF
+#define lpfc_sli4_sge_dif_apptag_WORD	word3
+#define lpfc_sli4_sge_dif_bs_SHIFT	16
+#define lpfc_sli4_sge_dif_bs_MASK	0x00000007
+#define lpfc_sli4_sge_dif_bs_WORD	word3
+#define lpfc_sli4_sge_dif_ai_SHIFT	19
+#define lpfc_sli4_sge_dif_ai_MASK	0x00000001
+#define lpfc_sli4_sge_dif_ai_WORD	word3
+#define lpfc_sli4_sge_dif_me_SHIFT	20
+#define lpfc_sli4_sge_dif_me_MASK	0x00000001
+#define lpfc_sli4_sge_dif_me_WORD	word3
+#define lpfc_sli4_sge_dif_re_SHIFT	21
+#define lpfc_sli4_sge_dif_re_MASK	0x00000001
+#define lpfc_sli4_sge_dif_re_WORD	word3
+#define lpfc_sli4_sge_dif_ce_SHIFT	22
+#define lpfc_sli4_sge_dif_ce_MASK	0x00000001
+#define lpfc_sli4_sge_dif_ce_WORD	word3
+#define lpfc_sli4_sge_dif_nr_SHIFT	23
+#define lpfc_sli4_sge_dif_nr_MASK	0x00000001
+#define lpfc_sli4_sge_dif_nr_WORD	word3
+#define lpfc_sli4_sge_dif_oprx_SHIFT	24
+#define lpfc_sli4_sge_dif_oprx_MASK	0x0000000F
+#define lpfc_sli4_sge_dif_oprx_WORD	word3
+#define lpfc_sli4_sge_dif_optx_SHIFT	28
+#define lpfc_sli4_sge_dif_optx_MASK	0x0000000F
+#define lpfc_sli4_sge_dif_optx_WORD	word3
+/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */
+};
+
+struct fcf_record {
+	uint32_t max_rcv_size;
+	uint32_t fka_adv_period;
+	uint32_t fip_priority;
+	uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT		0
+#define lpfc_fcf_record_mac_0_MASK		0x000000FF
+#define lpfc_fcf_record_mac_0_WORD		word3
+#define lpfc_fcf_record_mac_1_SHIFT		8
+#define lpfc_fcf_record_mac_1_MASK		0x000000FF
+#define lpfc_fcf_record_mac_1_WORD		word3
+#define lpfc_fcf_record_mac_2_SHIFT		16
+#define lpfc_fcf_record_mac_2_MASK		0x000000FF
+#define lpfc_fcf_record_mac_2_WORD		word3
+#define lpfc_fcf_record_mac_3_SHIFT		24
+#define lpfc_fcf_record_mac_3_MASK		0x000000FF
+#define lpfc_fcf_record_mac_3_WORD		word3
+	uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT		0
+#define lpfc_fcf_record_mac_4_MASK		0x000000FF
+#define lpfc_fcf_record_mac_4_WORD		word4
+#define lpfc_fcf_record_mac_5_SHIFT		8
+#define lpfc_fcf_record_mac_5_MASK		0x000000FF
+#define lpfc_fcf_record_mac_5_WORD		word4
+#define lpfc_fcf_record_fcf_avail_SHIFT		16
+#define lpfc_fcf_record_fcf_avail_MASK		0x000000FF
+#define lpfc_fcf_record_fcf_avail_WORD		word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT	24
+#define lpfc_fcf_record_mac_addr_prov_MASK	0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD	word4
+#define LPFC_FCF_FPMA           1 	/* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA           2       /* Server Provided MAC Address */
+	uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT	0
+#define lpfc_fcf_record_fab_name_0_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD		word5
+#define lpfc_fcf_record_fab_name_1_SHIFT	8
+#define lpfc_fcf_record_fab_name_1_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD		word5
+#define lpfc_fcf_record_fab_name_2_SHIFT	16
+#define lpfc_fcf_record_fab_name_2_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD		word5
+#define lpfc_fcf_record_fab_name_3_SHIFT	24
+#define lpfc_fcf_record_fab_name_3_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD		word5
+	uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT	0
+#define lpfc_fcf_record_fab_name_4_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD		word6
+#define lpfc_fcf_record_fab_name_5_SHIFT	8
+#define lpfc_fcf_record_fab_name_5_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD		word6
+#define lpfc_fcf_record_fab_name_6_SHIFT	16
+#define lpfc_fcf_record_fab_name_6_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD		word6
+#define lpfc_fcf_record_fab_name_7_SHIFT	24
+#define lpfc_fcf_record_fab_name_7_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD		word6
+	uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT		0
+#define lpfc_fcf_record_fc_map_0_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD		word7
+#define lpfc_fcf_record_fc_map_1_SHIFT		8
+#define lpfc_fcf_record_fc_map_1_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD		word7
+#define lpfc_fcf_record_fc_map_2_SHIFT		16
+#define lpfc_fcf_record_fc_map_2_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD		word7
+#define lpfc_fcf_record_fcf_valid_SHIFT		24
+#define lpfc_fcf_record_fcf_valid_MASK		0x00000001
+#define lpfc_fcf_record_fcf_valid_WORD		word7
+#define lpfc_fcf_record_fcf_fc_SHIFT		25
+#define lpfc_fcf_record_fcf_fc_MASK		0x00000001
+#define lpfc_fcf_record_fcf_fc_WORD		word7
+#define lpfc_fcf_record_fcf_sol_SHIFT		31
+#define lpfc_fcf_record_fcf_sol_MASK		0x00000001
+#define lpfc_fcf_record_fcf_sol_WORD		word7
+	uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT		0
+#define lpfc_fcf_record_fcf_index_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD		word8
+#define lpfc_fcf_record_fcf_state_SHIFT		16
+#define lpfc_fcf_record_fcf_state_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD		word8
+	uint8_t vlan_bitmap[512];
+	uint32_t word137;
+#define lpfc_fcf_record_switch_name_0_SHIFT	0
+#define lpfc_fcf_record_switch_name_0_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_0_WORD	word137
+#define lpfc_fcf_record_switch_name_1_SHIFT	8
+#define lpfc_fcf_record_switch_name_1_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_1_WORD	word137
+#define lpfc_fcf_record_switch_name_2_SHIFT	16
+#define lpfc_fcf_record_switch_name_2_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_2_WORD	word137
+#define lpfc_fcf_record_switch_name_3_SHIFT	24
+#define lpfc_fcf_record_switch_name_3_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_3_WORD	word137
+	uint32_t word138;
+#define lpfc_fcf_record_switch_name_4_SHIFT	0
+#define lpfc_fcf_record_switch_name_4_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_4_WORD	word138
+#define lpfc_fcf_record_switch_name_5_SHIFT	8
+#define lpfc_fcf_record_switch_name_5_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_5_WORD	word138
+#define lpfc_fcf_record_switch_name_6_SHIFT	16
+#define lpfc_fcf_record_switch_name_6_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_6_WORD	word138
+#define lpfc_fcf_record_switch_name_7_SHIFT	24
+#define lpfc_fcf_record_switch_name_7_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_7_WORD	word138
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	union {
+		struct {
+			uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK		0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD		word10
+		} request;
+		struct {
+			uint32_t eventag;
+		} response;
+	} u;
+	uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK	0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD	word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT        0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK         0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD         word10
+	struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT	0
+#define lpfc_mbx_del_fcf_tbl_count_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD		word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT	16
+#define lpfc_mbx_del_fcf_tbl_index_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD		word10
+};
+
+struct lpfc_mbx_redisc_fcf_tbl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_redisc_fcf_count_SHIFT		0
+#define lpfc_mbx_redisc_fcf_count_MASK		0x0000FFFF
+#define lpfc_mbx_redisc_fcf_count_WORD		word10
+	uint32_t resvd;
+	uint32_t word12;
+#define lpfc_mbx_redisc_fcf_index_SHIFT		0
+#define lpfc_mbx_redisc_fcf_index_MASK		0x0000FFFF
+#define lpfc_mbx_redisc_fcf_index_WORD		word12
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS					0x0
+#define STATUS_FAILED 					0x1
+#define STATUS_ILLEGAL_REQUEST				0x2
+#define STATUS_ILLEGAL_FIELD				0x3
+#define STATUS_INSUFFICIENT_BUFFER 			0x4
+#define STATUS_UNAUTHORIZED_REQUEST			0x5
+#define STATUS_FLASHROM_SAVE_FAILED			0x17
+#define STATUS_FLASHROM_RESTORE_FAILED			0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED			0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED 		0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM		0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM		0x1d
+#define STATUS_ASSERT_FAILED				0x1e
+#define STATUS_INVALID_SESSION				0x1f
+#define STATUS_INVALID_CONNECTION			0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT		0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH			0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID			0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND			0x26
+#define STATUS_FLASHROM_READ_FAILED			0x27
+#define STATUS_POLL_IOCTL_TIMEOUT			0x28
+#define STATUS_ERROR_ACITMAIN				0x2a
+#define STATUS_REBOOT_REQUIRED				0x2c
+#define STATUS_FCF_IN_USE				0x3a
+#define STATUS_FCF_TABLE_EMPTY				0x43
+
+/*
+ * Additional status field for embedded SLI_CONFIG mailbox
+ * command.
+ */
+#define ADD_STATUS_OPERATION_ALREADY_ACTIVE		0x67
+
+struct lpfc_mbx_sli4_config {
+	struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+	uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT		31
+#define lpfc_init_vfi_vr_MASK		0x00000001
+#define lpfc_init_vfi_vr_WORD		word1
+#define lpfc_init_vfi_vt_SHIFT		30
+#define lpfc_init_vfi_vt_MASK		0x00000001
+#define lpfc_init_vfi_vt_WORD		word1
+#define lpfc_init_vfi_vf_SHIFT		29
+#define lpfc_init_vfi_vf_MASK		0x00000001
+#define lpfc_init_vfi_vf_WORD		word1
+#define lpfc_init_vfi_vp_SHIFT		28
+#define lpfc_init_vfi_vp_MASK		0x00000001
+#define lpfc_init_vfi_vp_WORD		word1
+#define lpfc_init_vfi_vfi_SHIFT		0
+#define lpfc_init_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_init_vfi_vpi_SHIFT		16
+#define lpfc_init_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vpi_WORD		word2
+#define lpfc_init_vfi_fcfi_SHIFT	0
+#define lpfc_init_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD		word2
+	uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT		13
+#define lpfc_init_vfi_pri_MASK		0x00000007
+#define lpfc_init_vfi_pri_WORD		word3
+#define lpfc_init_vfi_vf_id_SHIFT	1
+#define lpfc_init_vfi_vf_id_MASK	0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD	word3
+	uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT	24
+#define lpfc_init_vfi_hop_count_MASK	0x000000FF
+#define lpfc_init_vfi_hop_count_WORD	word4
+};
+#define MBX_VFI_IN_USE			0x9F02
+
+
+struct lpfc_mbx_reg_vfi {
+	uint32_t word1;
+#define lpfc_reg_vfi_upd_SHIFT		29
+#define lpfc_reg_vfi_upd_MASK		0x00000001
+#define lpfc_reg_vfi_upd_WORD		word1
+#define lpfc_reg_vfi_vp_SHIFT		28
+#define lpfc_reg_vfi_vp_MASK		0x00000001
+#define lpfc_reg_vfi_vp_WORD		word1
+#define lpfc_reg_vfi_vfi_SHIFT		0
+#define lpfc_reg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT		16
+#define lpfc_reg_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD		word2
+#define lpfc_reg_vfi_fcfi_SHIFT		0
+#define lpfc_reg_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD		word2
+	uint32_t wwn[2];
+	struct ulp_bde64 bde;
+	uint32_t e_d_tov;
+	uint32_t r_a_tov;
+	uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT	0
+#define lpfc_reg_vfi_nport_id_MASK	0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD	word10
+#define lpfc_reg_vfi_bbcr_SHIFT		27
+#define lpfc_reg_vfi_bbcr_MASK		0x00000001
+#define lpfc_reg_vfi_bbcr_WORD		word10
+#define lpfc_reg_vfi_bbscn_SHIFT	28
+#define lpfc_reg_vfi_bbscn_MASK		0x0000000F
+#define lpfc_reg_vfi_bbscn_WORD		word10
+};
+
+struct lpfc_mbx_init_vpi {
+	uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT		16
+#define lpfc_init_vpi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD		word1
+#define lpfc_init_vpi_vpi_SHIFT		0
+#define lpfc_init_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD		word1
+};
+
+struct lpfc_mbx_read_vpi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT	0
+#define lpfc_mbx_read_vpi_vnportid_MASK		0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD		word2
+	uint32_t word3_rsvd;
+	uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT	0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD		word4
+#define lpfc_mbx_read_vpi_pb_SHIFT		15
+#define lpfc_mbx_read_vpi_pb_MASK		0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD		word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT	16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK	0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD	word4
+#define lpfc_mbx_read_vpi_ns_SHIFT		30
+#define lpfc_mbx_read_vpi_ns_MASK		0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD		word4
+#define lpfc_mbx_read_vpi_hl_SHIFT		31
+#define lpfc_mbx_read_vpi_hl_MASK		0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD		word4
+	uint32_t word5_rsvd;
+	uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT		0
+#define lpfc_mbx_read_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD		word6
+	uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_0_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD		word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_1_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD		word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT		16
+#define lpfc_mbx_read_vpi_mac_2_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD		word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT		24
+#define lpfc_mbx_read_vpi_mac_3_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_4_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD		word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_5_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD		word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT	16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK		0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD		word8
+#define lpfc_mbx_read_vpi_vv_SHIFT		28
+#define lpfc_mbx_read_vpi_vv_MASK		0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD		word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT	0
+#define lpfc_unreg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD		word2
+};
+
+struct lpfc_mbx_resume_rpi {
+	uint32_t word1;
+#define lpfc_resume_rpi_index_SHIFT	0
+#define lpfc_resume_rpi_index_MASK	0x0000FFFF
+#define lpfc_resume_rpi_index_WORD	word1
+#define lpfc_resume_rpi_ii_SHIFT	30
+#define lpfc_resume_rpi_ii_MASK		0x00000003
+#define lpfc_resume_rpi_ii_WORD		word1
+#define RESUME_INDEX_RPI		0
+#define RESUME_INDEX_VPI		1
+#define RESUME_INDEX_VFI		2
+#define RESUME_INDEX_FCFI		3
+	uint32_t event_tag;
+};
+
+#define REG_FCF_INVALID_QID	0xFFFF
+struct lpfc_mbx_reg_fcfi {
+	uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT	0
+#define lpfc_reg_fcfi_info_index_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD	word1
+#define lpfc_reg_fcfi_fcfi_SHIFT	16
+#define lpfc_reg_fcfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT	0
+#define lpfc_reg_fcfi_rq_id1_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD	word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT	16
+#define lpfc_reg_fcfi_rq_id0_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD	word2
+	uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT	0
+#define lpfc_reg_fcfi_rq_id3_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD	word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT	16
+#define lpfc_reg_fcfi_rq_id2_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD	word3
+	uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT	24
+#define lpfc_reg_fcfi_type_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD	word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT	16
+#define lpfc_reg_fcfi_type_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD	word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD	word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD	word4
+	uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT	24
+#define lpfc_reg_fcfi_type_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD	word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT	16
+#define lpfc_reg_fcfi_type_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD	word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD	word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD	word5
+	uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT	24
+#define lpfc_reg_fcfi_type_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD	word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT	16
+#define lpfc_reg_fcfi_type_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD	word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD	word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD	word6
+	uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT	24
+#define lpfc_reg_fcfi_type_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD	word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT	16
+#define lpfc_reg_fcfi_type_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD	word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD	word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD	word7
+	uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT		13
+#define lpfc_reg_fcfi_mam_MASK		0x00000003
+#define lpfc_reg_fcfi_mam_WORD		word8
+#define LPFC_MAM_BOTH		0	/* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA		1	/* Server Provided MAC Address */
+#define LPFC_MAM_FPMA		2	/* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT		12
+#define lpfc_reg_fcfi_vv_MASK		0x00000001
+#define lpfc_reg_fcfi_vv_WORD		word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT	0
+#define lpfc_reg_fcfi_vlan_tag_MASK	0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD	word8
+};
+
+struct lpfc_mbx_reg_fcfi_mrq {
+	uint32_t word1;
+#define lpfc_reg_fcfi_mrq_info_index_SHIFT	0
+#define lpfc_reg_fcfi_mrq_info_index_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_mrq_info_index_WORD	word1
+#define lpfc_reg_fcfi_mrq_fcfi_SHIFT		16
+#define lpfc_reg_fcfi_mrq_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_mrq_fcfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_fcfi_mrq_rq_id1_SHIFT		0
+#define lpfc_reg_fcfi_mrq_rq_id1_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id1_WORD		word2
+#define lpfc_reg_fcfi_mrq_rq_id0_SHIFT		16
+#define lpfc_reg_fcfi_mrq_rq_id0_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id0_WORD		word2
+	uint32_t word3;
+#define lpfc_reg_fcfi_mrq_rq_id3_SHIFT		0
+#define lpfc_reg_fcfi_mrq_rq_id3_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id3_WORD		word3
+#define lpfc_reg_fcfi_mrq_rq_id2_SHIFT		16
+#define lpfc_reg_fcfi_mrq_rq_id2_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id2_WORD		word3
+	uint32_t word4;
+#define lpfc_reg_fcfi_mrq_type_match0_SHIFT	24
+#define lpfc_reg_fcfi_mrq_type_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match0_WORD	word4
+#define lpfc_reg_fcfi_mrq_type_mask0_SHIFT	16
+#define lpfc_reg_fcfi_mrq_type_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask0_WORD	word4
+#define lpfc_reg_fcfi_mrq_rctl_match0_SHIFT	8
+#define lpfc_reg_fcfi_mrq_rctl_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match0_WORD	word4
+#define lpfc_reg_fcfi_mrq_rctl_mask0_SHIFT	0
+#define lpfc_reg_fcfi_mrq_rctl_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask0_WORD	word4
+	uint32_t word5;
+#define lpfc_reg_fcfi_mrq_type_match1_SHIFT	24
+#define lpfc_reg_fcfi_mrq_type_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match1_WORD	word5
+#define lpfc_reg_fcfi_mrq_type_mask1_SHIFT	16
+#define lpfc_reg_fcfi_mrq_type_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask1_WORD	word5
+#define lpfc_reg_fcfi_mrq_rctl_match1_SHIFT	8
+#define lpfc_reg_fcfi_mrq_rctl_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match1_WORD	word5
+#define lpfc_reg_fcfi_mrq_rctl_mask1_SHIFT	0
+#define lpfc_reg_fcfi_mrq_rctl_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask1_WORD	word5
+	uint32_t word6;
+#define lpfc_reg_fcfi_mrq_type_match2_SHIFT	24
+#define lpfc_reg_fcfi_mrq_type_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match2_WORD	word6
+#define lpfc_reg_fcfi_mrq_type_mask2_SHIFT	16
+#define lpfc_reg_fcfi_mrq_type_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask2_WORD	word6
+#define lpfc_reg_fcfi_mrq_rctl_match2_SHIFT	8
+#define lpfc_reg_fcfi_mrq_rctl_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match2_WORD	word6
+#define lpfc_reg_fcfi_mrq_rctl_mask2_SHIFT	0
+#define lpfc_reg_fcfi_mrq_rctl_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask2_WORD	word6
+	uint32_t word7;
+#define lpfc_reg_fcfi_mrq_type_match3_SHIFT	24
+#define lpfc_reg_fcfi_mrq_type_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match3_WORD	word7
+#define lpfc_reg_fcfi_mrq_type_mask3_SHIFT	16
+#define lpfc_reg_fcfi_mrq_type_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask3_WORD	word7
+#define lpfc_reg_fcfi_mrq_rctl_match3_SHIFT	8
+#define lpfc_reg_fcfi_mrq_rctl_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match3_WORD	word7
+#define lpfc_reg_fcfi_mrq_rctl_mask3_SHIFT	0
+#define lpfc_reg_fcfi_mrq_rctl_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask3_WORD	word7
+	uint32_t word8;
+#define lpfc_reg_fcfi_mrq_ptc7_SHIFT		31
+#define lpfc_reg_fcfi_mrq_ptc7_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc7_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc6_SHIFT		30
+#define lpfc_reg_fcfi_mrq_ptc6_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc6_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc5_SHIFT		29
+#define lpfc_reg_fcfi_mrq_ptc5_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc5_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc4_SHIFT		28
+#define lpfc_reg_fcfi_mrq_ptc4_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc4_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc3_SHIFT		27
+#define lpfc_reg_fcfi_mrq_ptc3_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc3_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc2_SHIFT		26
+#define lpfc_reg_fcfi_mrq_ptc2_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc2_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc1_SHIFT		25
+#define lpfc_reg_fcfi_mrq_ptc1_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc1_WORD		word8
+#define lpfc_reg_fcfi_mrq_ptc0_SHIFT		24
+#define lpfc_reg_fcfi_mrq_ptc0_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_ptc0_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt7_SHIFT		23
+#define lpfc_reg_fcfi_mrq_pt7_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt7_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt6_SHIFT		22
+#define lpfc_reg_fcfi_mrq_pt6_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt6_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt5_SHIFT		21
+#define lpfc_reg_fcfi_mrq_pt5_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt5_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt4_SHIFT		20
+#define lpfc_reg_fcfi_mrq_pt4_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt4_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt3_SHIFT		19
+#define lpfc_reg_fcfi_mrq_pt3_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt3_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt2_SHIFT		18
+#define lpfc_reg_fcfi_mrq_pt2_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt2_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt1_SHIFT		17
+#define lpfc_reg_fcfi_mrq_pt1_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt1_WORD		word8
+#define lpfc_reg_fcfi_mrq_pt0_SHIFT		16
+#define lpfc_reg_fcfi_mrq_pt0_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_pt0_WORD		word8
+#define lpfc_reg_fcfi_mrq_xmv_SHIFT		15
+#define lpfc_reg_fcfi_mrq_xmv_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_xmv_WORD		word8
+#define lpfc_reg_fcfi_mrq_mode_SHIFT		13
+#define lpfc_reg_fcfi_mrq_mode_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_mode_WORD		word8
+#define lpfc_reg_fcfi_mrq_vv_SHIFT		12
+#define lpfc_reg_fcfi_mrq_vv_MASK		0x00000001
+#define lpfc_reg_fcfi_mrq_vv_WORD		word8
+#define lpfc_reg_fcfi_mrq_vlan_tag_SHIFT	0
+#define lpfc_reg_fcfi_mrq_vlan_tag_MASK		0x00000FFF
+#define lpfc_reg_fcfi_mrq_vlan_tag_WORD		word8
+	uint32_t word9;
+#define lpfc_reg_fcfi_mrq_policy_SHIFT		12
+#define lpfc_reg_fcfi_mrq_policy_MASK		0x0000000F
+#define lpfc_reg_fcfi_mrq_policy_WORD		word9
+#define lpfc_reg_fcfi_mrq_filter_SHIFT		8
+#define lpfc_reg_fcfi_mrq_filter_MASK		0x0000000F
+#define lpfc_reg_fcfi_mrq_filter_WORD		word9
+#define lpfc_reg_fcfi_mrq_npairs_SHIFT		0
+#define lpfc_reg_fcfi_mrq_npairs_MASK		0x000000FF
+#define lpfc_reg_fcfi_mrq_npairs_WORD		word9
+	uint32_t word10;
+	uint32_t word11;
+	uint32_t word12;
+	uint32_t word13;
+	uint32_t word14;
+	uint32_t word15;
+	uint32_t word16;
+};
+
+struct lpfc_mbx_unreg_fcfi {
+	uint32_t word1_rsv;
+	uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT		0
+#define lpfc_unreg_fcfi_MASK		0x0000FFFF
+#define lpfc_unreg_fcfi_WORD		word2
+};
+
+struct lpfc_mbx_read_rev {
+	uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT  		16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK   		0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD   		word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT		20
+#define lpfc_mbx_rd_rev_fcoe_MASK		0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD		word1
+#define lpfc_mbx_rd_rev_cee_ver_SHIFT		21
+#define lpfc_mbx_rd_rev_cee_ver_MASK		0x00000003
+#define lpfc_mbx_rd_rev_cee_ver_WORD		word1
+#define LPFC_PREDCBX_CEE_MODE	0
+#define LPFC_DCBX_CEE_MODE	1
+#define lpfc_mbx_rd_rev_vpd_SHIFT		29
+#define lpfc_mbx_rd_rev_vpd_MASK		0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD		word1
+	uint32_t first_hw_rev;
+	uint32_t second_hw_rev;
+	uint32_t word4_rsvd;
+	uint32_t third_hw_rev;
+	uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT		0
+#define lpfc_mbx_rd_rev_fcph_low_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD		word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT		8
+#define lpfc_mbx_rd_rev_fcph_high_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD		word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT	16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD	word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT	24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD	word6
+	uint32_t word7_rsvd;
+	uint32_t fw_id_rev;
+	uint8_t  fw_name[16];
+	uint32_t ulp_fw_id_rev;
+	uint8_t  ulp_fw_name[16];
+	uint32_t word18_47_rsvd[30];
+	uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT		0
+#define lpfc_mbx_rd_rev_avail_len_MASK		0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD		word48
+	uint32_t vpd_paddr_low;
+	uint32_t vpd_paddr_high;
+	uint32_t avail_vpd_len;
+	uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+	uint32_t word1;
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT	31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK	0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD	word1
+	uint32_t word2;
+#define lpfc_mbx_rd_conf_lnk_numb_SHIFT		0
+#define lpfc_mbx_rd_conf_lnk_numb_MASK		0x0000003F
+#define lpfc_mbx_rd_conf_lnk_numb_WORD		word2
+#define lpfc_mbx_rd_conf_lnk_type_SHIFT		6
+#define lpfc_mbx_rd_conf_lnk_type_MASK		0x00000003
+#define lpfc_mbx_rd_conf_lnk_type_WORD		word2
+#define LPFC_LNK_TYPE_GE	0
+#define LPFC_LNK_TYPE_FC	1
+#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT		8
+#define lpfc_mbx_rd_conf_lnk_ldv_MASK		0x00000001
+#define lpfc_mbx_rd_conf_lnk_ldv_WORD		word2
+#define lpfc_mbx_rd_conf_topology_SHIFT		24
+#define lpfc_mbx_rd_conf_topology_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD		word2
+	uint32_t rsvd_3;
+	uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD		word4
+	uint32_t rsvd_5;
+	uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD		word6
+#define lpfc_mbx_rd_conf_link_speed_SHIFT	16
+#define lpfc_mbx_rd_conf_link_speed_MASK	0x0000FFFF
+#define lpfc_mbx_rd_conf_link_speed_WORD	word6
+	uint32_t rsvd_7;
+	uint32_t word8;
+#define lpfc_mbx_rd_conf_bbscn_min_SHIFT	0
+#define lpfc_mbx_rd_conf_bbscn_min_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_min_WORD		word8
+#define lpfc_mbx_rd_conf_bbscn_max_SHIFT	4
+#define lpfc_mbx_rd_conf_bbscn_max_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_max_WORD		word8
+#define lpfc_mbx_rd_conf_bbscn_def_SHIFT	8
+#define lpfc_mbx_rd_conf_bbscn_def_MASK		0x0000000F
+#define lpfc_mbx_rd_conf_bbscn_def_WORD		word8
+	uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT		0
+#define lpfc_mbx_rd_conf_lmt_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD		word9
+	uint32_t rsvd_10;
+	uint32_t rsvd_11;
+	uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT		0
+#define lpfc_mbx_rd_conf_xri_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD		word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT	16
+#define lpfc_mbx_rd_conf_xri_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD		word12
+	uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_rpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD		word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_rpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD		word13
+	uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_vpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD		word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_vpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD		word14
+	uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT         0
+#define lpfc_mbx_rd_conf_vfi_base_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD          word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT        16
+#define lpfc_mbx_rd_conf_vfi_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD         word15
+	uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK	0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD	word16
+	uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_rq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD		word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_eq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD		word17
+	uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_wq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD		word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_cq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD		word18
+};
+
+struct lpfc_mbx_request_features {
+	uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT		0
+#define lpfc_mbx_rq_ftr_qry_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD		word1
+	uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_iaar_SHIFT		9
+#define lpfc_mbx_rq_ftr_rq_iaar_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaar_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT		11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT		16
+#define lpfc_mbx_rq_ftr_rq_mrqp_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_mrqp_WORD		word2
+	uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT		11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT		16
+#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD		word3
+};
+
+struct lpfc_mbx_supp_pages {
+	uint32_t word1;
+#define qs_SHIFT 				0
+#define qs_MASK					0x00000001
+#define qs_WORD					word1
+#define wr_SHIFT				1
+#define wr_MASK 				0x00000001
+#define wr_WORD					word1
+#define pf_SHIFT				8
+#define pf_MASK					0x000000ff
+#define pf_WORD					word1
+#define cpn_SHIFT				16
+#define cpn_MASK				0x000000ff
+#define cpn_WORD				word1
+	uint32_t word2;
+#define list_offset_SHIFT 			0
+#define list_offset_MASK			0x000000ff
+#define list_offset_WORD			word2
+#define next_offset_SHIFT			8
+#define next_offset_MASK			0x000000ff
+#define next_offset_WORD			word2
+#define elem_cnt_SHIFT				16
+#define elem_cnt_MASK				0x000000ff
+#define elem_cnt_WORD				word2
+	uint32_t word3;
+#define pn_0_SHIFT				24
+#define pn_0_MASK  				0x000000ff
+#define pn_0_WORD				word3
+#define pn_1_SHIFT				16
+#define pn_1_MASK				0x000000ff
+#define pn_1_WORD				word3
+#define pn_2_SHIFT				8
+#define pn_2_MASK				0x000000ff
+#define pn_2_WORD				word3
+#define pn_3_SHIFT				0
+#define pn_3_MASK				0x000000ff
+#define pn_3_WORD				word3
+	uint32_t word4;
+#define pn_4_SHIFT				24
+#define pn_4_MASK				0x000000ff
+#define pn_4_WORD				word4
+#define pn_5_SHIFT				16
+#define pn_5_MASK				0x000000ff
+#define pn_5_WORD				word4
+#define pn_6_SHIFT				8
+#define pn_6_MASK				0x000000ff
+#define pn_6_WORD				word4
+#define pn_7_SHIFT				0
+#define pn_7_MASK				0x000000ff
+#define pn_7_WORD				word4
+	uint32_t rsvd[27];
+#define LPFC_SUPP_PAGES			0
+#define LPFC_BLOCK_GUARD_PROFILES	1
+#define LPFC_SLI4_PARAMETERS		2
+};
+
+struct lpfc_mbx_memory_dump_type3 {
+	uint32_t word1;
+#define lpfc_mbx_memory_dump_type3_type_SHIFT    0
+#define lpfc_mbx_memory_dump_type3_type_MASK     0x0000000f
+#define lpfc_mbx_memory_dump_type3_type_WORD     word1
+#define lpfc_mbx_memory_dump_type3_link_SHIFT    24
+#define lpfc_mbx_memory_dump_type3_link_MASK     0x000000ff
+#define lpfc_mbx_memory_dump_type3_link_WORD     word1
+	uint32_t word2;
+#define lpfc_mbx_memory_dump_type3_page_no_SHIFT  0
+#define lpfc_mbx_memory_dump_type3_page_no_MASK   0x0000ffff
+#define lpfc_mbx_memory_dump_type3_page_no_WORD   word2
+#define lpfc_mbx_memory_dump_type3_offset_SHIFT   16
+#define lpfc_mbx_memory_dump_type3_offset_MASK    0x0000ffff
+#define lpfc_mbx_memory_dump_type3_offset_WORD    word2
+	uint32_t word3;
+#define lpfc_mbx_memory_dump_type3_length_SHIFT  0
+#define lpfc_mbx_memory_dump_type3_length_MASK   0x00ffffff
+#define lpfc_mbx_memory_dump_type3_length_WORD   word3
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+	uint32_t return_len;
+};
+
+#define DMP_PAGE_A0             0xa0
+#define DMP_PAGE_A2             0xa2
+#define DMP_SFF_PAGE_A0_SIZE	256
+#define DMP_SFF_PAGE_A2_SIZE	256
+
+#define SFP_WAVELENGTH_LC1310	1310
+#define SFP_WAVELENGTH_LL1550	1550
+
+
+/*
+ *  * SFF-8472 TABLE 3.4
+ *   */
+#define  SFF_PG0_CONNECTOR_UNKNOWN    0x00   /* Unknown  */
+#define  SFF_PG0_CONNECTOR_SC         0x01   /* SC       */
+#define  SFF_PG0_CONNECTOR_FC_COPPER1 0x02   /* FC style 1 copper connector */
+#define  SFF_PG0_CONNECTOR_FC_COPPER2 0x03   /* FC style 2 copper connector */
+#define  SFF_PG0_CONNECTOR_BNC        0x04   /* BNC / TNC */
+#define  SFF_PG0_CONNECTOR__FC_COAX   0x05   /* FC coaxial headers */
+#define  SFF_PG0_CONNECTOR_FIBERJACK  0x06   /* FiberJack */
+#define  SFF_PG0_CONNECTOR_LC         0x07   /* LC        */
+#define  SFF_PG0_CONNECTOR_MT         0x08   /* MT - RJ   */
+#define  SFF_PG0_CONNECTOR_MU         0x09   /* MU        */
+#define  SFF_PG0_CONNECTOR_SF         0x0A   /* SG        */
+#define  SFF_PG0_CONNECTOR_OPTICAL_PIGTAIL 0x0B /* Optical pigtail */
+#define  SFF_PG0_CONNECTOR_OPTICAL_PARALLEL 0x0C /* MPO Parallel Optic */
+#define  SFF_PG0_CONNECTOR_HSSDC_II   0x20   /* HSSDC II */
+#define  SFF_PG0_CONNECTOR_COPPER_PIGTAIL 0x21 /* Copper pigtail */
+#define  SFF_PG0_CONNECTOR_RJ45       0x22  /* RJ45 */
+
+/* SFF-8472 Table 3.1 Diagnostics: Data Fields Address/Page A0 */
+
+#define SSF_IDENTIFIER			0
+#define SSF_EXT_IDENTIFIER		1
+#define SSF_CONNECTOR			2
+#define SSF_TRANSCEIVER_CODE_B0		3
+#define SSF_TRANSCEIVER_CODE_B1		4
+#define SSF_TRANSCEIVER_CODE_B2		5
+#define SSF_TRANSCEIVER_CODE_B3		6
+#define SSF_TRANSCEIVER_CODE_B4		7
+#define SSF_TRANSCEIVER_CODE_B5		8
+#define SSF_TRANSCEIVER_CODE_B6		9
+#define SSF_TRANSCEIVER_CODE_B7		10
+#define SSF_ENCODING			11
+#define SSF_BR_NOMINAL			12
+#define SSF_RATE_IDENTIFIER		13
+#define SSF_LENGTH_9UM_KM		14
+#define SSF_LENGTH_9UM			15
+#define SSF_LENGTH_50UM_OM2		16
+#define SSF_LENGTH_62UM_OM1		17
+#define SFF_LENGTH_COPPER		18
+#define SSF_LENGTH_50UM_OM3		19
+#define SSF_VENDOR_NAME			20
+#define SSF_VENDOR_OUI			36
+#define SSF_VENDOR_PN			40
+#define SSF_VENDOR_REV			56
+#define SSF_WAVELENGTH_B1		60
+#define SSF_WAVELENGTH_B0		61
+#define SSF_CC_BASE			63
+#define SSF_OPTIONS_B1			64
+#define SSF_OPTIONS_B0			65
+#define SSF_BR_MAX			66
+#define SSF_BR_MIN			67
+#define SSF_VENDOR_SN			68
+#define SSF_DATE_CODE			84
+#define SSF_MONITORING_TYPEDIAGNOSTIC	92
+#define SSF_ENHANCED_OPTIONS		93
+#define SFF_8472_COMPLIANCE		94
+#define SSF_CC_EXT			95
+#define SSF_A0_VENDOR_SPECIFIC		96
+
+/* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */
+
+#define SSF_TEMP_HIGH_ALARM		0
+#define SSF_TEMP_LOW_ALARM		2
+#define SSF_TEMP_HIGH_WARNING		4
+#define SSF_TEMP_LOW_WARNING		6
+#define SSF_VOLTAGE_HIGH_ALARM		8
+#define SSF_VOLTAGE_LOW_ALARM		10
+#define SSF_VOLTAGE_HIGH_WARNING	12
+#define SSF_VOLTAGE_LOW_WARNING		14
+#define SSF_BIAS_HIGH_ALARM		16
+#define SSF_BIAS_LOW_ALARM		18
+#define SSF_BIAS_HIGH_WARNING		20
+#define SSF_BIAS_LOW_WARNING		22
+#define SSF_TXPOWER_HIGH_ALARM		24
+#define SSF_TXPOWER_LOW_ALARM		26
+#define SSF_TXPOWER_HIGH_WARNING	28
+#define SSF_TXPOWER_LOW_WARNING		30
+#define SSF_RXPOWER_HIGH_ALARM		32
+#define SSF_RXPOWER_LOW_ALARM		34
+#define SSF_RXPOWER_HIGH_WARNING	36
+#define SSF_RXPOWER_LOW_WARNING		38
+#define SSF_EXT_CAL_CONSTANTS		56
+#define SSF_CC_DMI			95
+#define SFF_TEMPERATURE_B1		96
+#define SFF_TEMPERATURE_B0		97
+#define SFF_VCC_B1			98
+#define SFF_VCC_B0			99
+#define SFF_TX_BIAS_CURRENT_B1		100
+#define SFF_TX_BIAS_CURRENT_B0		101
+#define SFF_TXPOWER_B1			102
+#define SFF_TXPOWER_B0			103
+#define SFF_RXPOWER_B1			104
+#define SFF_RXPOWER_B0			105
+#define SSF_STATUS_CONTROL		110
+#define SSF_ALARM_FLAGS			112
+#define SSF_WARNING_FLAGS		116
+#define SSF_EXT_TATUS_CONTROL_B1	118
+#define SSF_EXT_TATUS_CONTROL_B0	119
+#define SSF_A2_VENDOR_SPECIFIC		120
+#define SSF_USER_EEPROM			128
+#define SSF_VENDOR_CONTROL		148
+
+
+/*
+ * Tranceiver codes Fibre Channel SFF-8472
+ * Table 3.5.
+ */
+
+struct sff_trasnceiver_codes_byte0 {
+	uint8_t inifiband:4;
+	uint8_t teng_ethernet:4;
+};
+
+struct sff_trasnceiver_codes_byte1 {
+	uint8_t  sonet:6;
+	uint8_t  escon:2;
+};
+
+struct sff_trasnceiver_codes_byte2 {
+	uint8_t  soNet:8;
+};
+
+struct sff_trasnceiver_codes_byte3 {
+	uint8_t ethernet:8;
+};
+
+struct sff_trasnceiver_codes_byte4 {
+	uint8_t fc_el_lo:1;
+	uint8_t fc_lw_laser:1;
+	uint8_t fc_sw_laser:1;
+	uint8_t fc_md_distance:1;
+	uint8_t fc_lg_distance:1;
+	uint8_t fc_int_distance:1;
+	uint8_t fc_short_distance:1;
+	uint8_t fc_vld_distance:1;
+};
+
+struct sff_trasnceiver_codes_byte5 {
+	uint8_t reserved1:1;
+	uint8_t reserved2:1;
+	uint8_t fc_sfp_active:1;  /* Active cable   */
+	uint8_t fc_sfp_passive:1; /* Passive cable  */
+	uint8_t fc_lw_laser:1;     /* Longwave laser */
+	uint8_t fc_sw_laser_sl:1;
+	uint8_t fc_sw_laser_sn:1;
+	uint8_t fc_el_hi:1;        /* Electrical enclosure high bit */
+};
+
+struct sff_trasnceiver_codes_byte6 {
+	uint8_t fc_tm_sm:1;      /* Single Mode */
+	uint8_t reserved:1;
+	uint8_t fc_tm_m6:1;       /* Multimode, 62.5um (M6) */
+	uint8_t fc_tm_tv:1;      /* Video Coax (TV) */
+	uint8_t fc_tm_mi:1;      /* Miniature Coax (MI) */
+	uint8_t fc_tm_tp:1;      /* Twisted Pair (TP) */
+	uint8_t fc_tm_tw:1;      /* Twin Axial Pair  */
+};
+
+struct sff_trasnceiver_codes_byte7 {
+	uint8_t fc_sp_100MB:1;   /*  100 MB/sec */
+	uint8_t reserve:1;
+	uint8_t fc_sp_200mb:1;   /*  200 MB/sec */
+	uint8_t fc_sp_3200MB:1;  /* 3200 MB/sec */
+	uint8_t fc_sp_400MB:1;   /*  400 MB/sec */
+	uint8_t fc_sp_1600MB:1;  /* 1600 MB/sec */
+	uint8_t fc_sp_800MB:1;   /*  800 MB/sec */
+	uint8_t fc_sp_1200MB:1;  /* 1200 MB/sec */
+};
+
+/* User writable non-volatile memory, SFF-8472 Table 3.20 */
+struct user_eeprom {
+	uint8_t vendor_name[16];
+	uint8_t vendor_oui[3];
+	uint8_t vendor_pn[816];
+	uint8_t vendor_rev[4];
+	uint8_t vendor_sn[16];
+	uint8_t datecode[6];
+	uint8_t lot_code[2];
+	uint8_t reserved191[57];
+};
+
+struct lpfc_mbx_pc_sli4_params {
+	uint32_t word1;
+#define qs_SHIFT				0
+#define qs_MASK					0x00000001
+#define qs_WORD					word1
+#define wr_SHIFT				1
+#define wr_MASK					0x00000001
+#define wr_WORD					word1
+#define pf_SHIFT				8
+#define pf_MASK					0x000000ff
+#define pf_WORD					word1
+#define cpn_SHIFT				16
+#define cpn_MASK				0x000000ff
+#define cpn_WORD				word1
+	uint32_t word2;
+#define if_type_SHIFT				0
+#define if_type_MASK				0x00000007
+#define if_type_WORD				word2
+#define sli_rev_SHIFT				4
+#define sli_rev_MASK				0x0000000f
+#define sli_rev_WORD				word2
+#define sli_family_SHIFT			8
+#define sli_family_MASK				0x000000ff
+#define sli_family_WORD				word2
+#define featurelevel_1_SHIFT			16
+#define featurelevel_1_MASK			0x000000ff
+#define featurelevel_1_WORD			word2
+#define featurelevel_2_SHIFT			24
+#define featurelevel_2_MASK			0x0000001f
+#define featurelevel_2_WORD			word2
+	uint32_t word3;
+#define fcoe_SHIFT 				0
+#define fcoe_MASK				0x00000001
+#define fcoe_WORD				word3
+#define fc_SHIFT				1
+#define fc_MASK					0x00000001
+#define fc_WORD					word3
+#define nic_SHIFT				2
+#define nic_MASK				0x00000001
+#define nic_WORD				word3
+#define iscsi_SHIFT				3
+#define iscsi_MASK				0x00000001
+#define iscsi_WORD				word3
+#define rdma_SHIFT				4
+#define rdma_MASK				0x00000001
+#define rdma_WORD				word3
+	uint32_t sge_supp_len;
+#define SLI4_PAGE_SIZE 4096
+	uint32_t word5;
+#define if_page_sz_SHIFT			0
+#define if_page_sz_MASK				0x0000ffff
+#define if_page_sz_WORD				word5
+#define loopbk_scope_SHIFT			24
+#define loopbk_scope_MASK			0x0000000f
+#define loopbk_scope_WORD			word5
+#define rq_db_window_SHIFT			28
+#define rq_db_window_MASK			0x0000000f
+#define rq_db_window_WORD			word5
+	uint32_t word6;
+#define eq_pages_SHIFT				0
+#define eq_pages_MASK				0x0000000f
+#define eq_pages_WORD				word6
+#define eqe_size_SHIFT				8
+#define eqe_size_MASK				0x000000ff
+#define eqe_size_WORD				word6
+	uint32_t word7;
+#define cq_pages_SHIFT				0
+#define cq_pages_MASK				0x0000000f
+#define cq_pages_WORD				word7
+#define cqe_size_SHIFT				8
+#define cqe_size_MASK				0x000000ff
+#define cqe_size_WORD				word7
+	uint32_t word8;
+#define mq_pages_SHIFT				0
+#define mq_pages_MASK				0x0000000f
+#define mq_pages_WORD				word8
+#define mqe_size_SHIFT				8
+#define mqe_size_MASK				0x000000ff
+#define mqe_size_WORD				word8
+#define mq_elem_cnt_SHIFT			16
+#define mq_elem_cnt_MASK			0x000000ff
+#define mq_elem_cnt_WORD			word8
+	uint32_t word9;
+#define wq_pages_SHIFT				0
+#define wq_pages_MASK				0x0000ffff
+#define wq_pages_WORD				word9
+#define wqe_size_SHIFT				8
+#define wqe_size_MASK				0x000000ff
+#define wqe_size_WORD				word9
+	uint32_t word10;
+#define rq_pages_SHIFT				0
+#define rq_pages_MASK				0x0000ffff
+#define rq_pages_WORD				word10
+#define rqe_size_SHIFT				8
+#define rqe_size_MASK				0x000000ff
+#define rqe_size_WORD				word10
+	uint32_t word11;
+#define hdr_pages_SHIFT				0
+#define hdr_pages_MASK				0x0000000f
+#define hdr_pages_WORD				word11
+#define hdr_size_SHIFT				8
+#define hdr_size_MASK				0x0000000f
+#define hdr_size_WORD				word11
+#define hdr_pp_align_SHIFT			16
+#define hdr_pp_align_MASK			0x0000ffff
+#define hdr_pp_align_WORD			word11
+	uint32_t word12;
+#define sgl_pages_SHIFT				0
+#define sgl_pages_MASK				0x0000000f
+#define sgl_pages_WORD				word12
+#define sgl_pp_align_SHIFT			16
+#define sgl_pp_align_MASK			0x0000ffff
+#define sgl_pp_align_WORD			word12
+	uint32_t rsvd_13_63[51];
+};
+#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
+			       &(~((SLI4_PAGE_SIZE)-1)))
+
+struct lpfc_sli4_parameters {
+	uint32_t word0;
+#define cfg_prot_type_SHIFT			0
+#define cfg_prot_type_MASK			0x000000FF
+#define cfg_prot_type_WORD			word0
+	uint32_t word1;
+#define cfg_ft_SHIFT				0
+#define cfg_ft_MASK				0x00000001
+#define cfg_ft_WORD				word1
+#define cfg_sli_rev_SHIFT			4
+#define cfg_sli_rev_MASK			0x0000000f
+#define cfg_sli_rev_WORD			word1
+#define cfg_sli_family_SHIFT			8
+#define cfg_sli_family_MASK			0x0000000f
+#define cfg_sli_family_WORD			word1
+#define cfg_if_type_SHIFT			12
+#define cfg_if_type_MASK			0x0000000f
+#define cfg_if_type_WORD			word1
+#define cfg_sli_hint_1_SHIFT			16
+#define cfg_sli_hint_1_MASK			0x000000ff
+#define cfg_sli_hint_1_WORD			word1
+#define cfg_sli_hint_2_SHIFT			24
+#define cfg_sli_hint_2_MASK			0x0000001f
+#define cfg_sli_hint_2_WORD			word1
+	uint32_t word2;
+	uint32_t word3;
+	uint32_t word4;
+#define cfg_cqv_SHIFT				14
+#define cfg_cqv_MASK				0x00000003
+#define cfg_cqv_WORD				word4
+	uint32_t word5;
+	uint32_t word6;
+#define cfg_mqv_SHIFT				14
+#define cfg_mqv_MASK				0x00000003
+#define cfg_mqv_WORD				word6
+	uint32_t word7;
+	uint32_t word8;
+#define cfg_wqpcnt_SHIFT			0
+#define cfg_wqpcnt_MASK				0x0000000f
+#define cfg_wqpcnt_WORD				word8
+#define cfg_wqsize_SHIFT			8
+#define cfg_wqsize_MASK				0x0000000f
+#define cfg_wqsize_WORD				word8
+#define cfg_wqv_SHIFT				14
+#define cfg_wqv_MASK				0x00000003
+#define cfg_wqv_WORD				word8
+#define cfg_wqpsize_SHIFT			16
+#define cfg_wqpsize_MASK			0x000000ff
+#define cfg_wqpsize_WORD			word8
+	uint32_t word9;
+	uint32_t word10;
+#define cfg_rqv_SHIFT				14
+#define cfg_rqv_MASK				0x00000003
+#define cfg_rqv_WORD				word10
+	uint32_t word11;
+#define cfg_rq_db_window_SHIFT			28
+#define cfg_rq_db_window_MASK			0x0000000f
+#define cfg_rq_db_window_WORD			word11
+	uint32_t word12;
+#define cfg_fcoe_SHIFT				0
+#define cfg_fcoe_MASK				0x00000001
+#define cfg_fcoe_WORD				word12
+#define cfg_ext_SHIFT				1
+#define cfg_ext_MASK				0x00000001
+#define cfg_ext_WORD				word12
+#define cfg_hdrr_SHIFT				2
+#define cfg_hdrr_MASK				0x00000001
+#define cfg_hdrr_WORD				word12
+#define cfg_phwq_SHIFT				15
+#define cfg_phwq_MASK				0x00000001
+#define cfg_phwq_WORD				word12
+#define cfg_oas_SHIFT				25
+#define cfg_oas_MASK				0x00000001
+#define cfg_oas_WORD				word12
+#define cfg_loopbk_scope_SHIFT			28
+#define cfg_loopbk_scope_MASK			0x0000000f
+#define cfg_loopbk_scope_WORD			word12
+	uint32_t sge_supp_len;
+	uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT			0
+#define cfg_sgl_page_cnt_MASK			0x0000000f
+#define cfg_sgl_page_cnt_WORD			word14
+#define cfg_sgl_page_size_SHIFT			8
+#define cfg_sgl_page_size_MASK			0x000000ff
+#define cfg_sgl_page_size_WORD			word14
+#define cfg_sgl_pp_align_SHIFT			16
+#define cfg_sgl_pp_align_MASK			0x000000ff
+#define cfg_sgl_pp_align_WORD			word14
+	uint32_t word15;
+	uint32_t word16;
+	uint32_t word17;
+	uint32_t word18;
+	uint32_t word19;
+#define cfg_ext_embed_cb_SHIFT			0
+#define cfg_ext_embed_cb_MASK			0x00000001
+#define cfg_ext_embed_cb_WORD			word19
+#define cfg_mds_diags_SHIFT			1
+#define cfg_mds_diags_MASK			0x00000001
+#define cfg_mds_diags_WORD			word19
+#define cfg_nvme_SHIFT				3
+#define cfg_nvme_MASK				0x00000001
+#define cfg_nvme_WORD				word19
+#define cfg_xib_SHIFT				4
+#define cfg_xib_MASK				0x00000001
+#define cfg_xib_WORD				word19
+#define cfg_eqdr_SHIFT				8
+#define cfg_eqdr_MASK				0x00000001
+#define cfg_eqdr_WORD				word19
+#define LPFC_NODELAY_MAX_IO		32
+};
+
+#define LPFC_SET_UE_RECOVERY		0x10
+#define LPFC_SET_MDS_DIAGS		0x11
+struct lpfc_mbx_set_feature {
+	struct mbox_header header;
+	uint32_t feature;
+	uint32_t param_len;
+	uint32_t word6;
+#define lpfc_mbx_set_feature_UER_SHIFT  0
+#define lpfc_mbx_set_feature_UER_MASK   0x00000001
+#define lpfc_mbx_set_feature_UER_WORD   word6
+#define lpfc_mbx_set_feature_mds_SHIFT  0
+#define lpfc_mbx_set_feature_mds_MASK   0x00000001
+#define lpfc_mbx_set_feature_mds_WORD   word6
+#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT  1
+#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK   0x00000001
+#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD   word6
+	uint32_t word7;
+#define lpfc_mbx_set_feature_UERP_SHIFT 0
+#define lpfc_mbx_set_feature_UERP_MASK  0x0000ffff
+#define lpfc_mbx_set_feature_UERP_WORD  word7
+#define lpfc_mbx_set_feature_UESR_SHIFT 16
+#define lpfc_mbx_set_feature_UESR_MASK  0x0000ffff
+#define lpfc_mbx_set_feature_UESR_WORD  word7
+};
+
+
+#define LPFC_SET_HOST_OS_DRIVER_VERSION    0x2
+struct lpfc_mbx_set_host_data {
+#define LPFC_HOST_OS_DRIVER_VERSION_SIZE   48
+	struct mbox_header header;
+	uint32_t param_id;
+	uint32_t param_len;
+	uint8_t  data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+};
+
+
+struct lpfc_mbx_get_sli4_parameters {
+	struct mbox_header header;
+	struct lpfc_sli4_parameters sli4_parameters;
+};
+
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE			22
+	uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+	uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT		0
+#define lpfc_rsrc_desc_pcie_type_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD		word0
+#define LPFC_RSRC_DESC_TYPE_PCIE		0x40
+#define lpfc_rsrc_desc_pcie_length_SHIFT	8
+#define lpfc_rsrc_desc_pcie_length_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_length_WORD		word0
+	uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT		0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD		word1
+	uint32_t reserved;
+	uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT	0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK	0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD	word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT	8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD		word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT	16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK	0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD	word3
+	uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT	0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK	0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD	word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+	uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK		0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD		word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE		0x43
+#define lpfc_rsrc_desc_fcfcoe_length_SHIFT	8
+#define lpfc_rsrc_desc_fcfcoe_length_MASK	0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_length_WORD	word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD	0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH	72
+#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH	88
+	uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK	0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD	word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK        0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD        word1
+	uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD	word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD	word2
+	uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD	word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD	word3
+	uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD	word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD	word4
+	uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD	word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD	word5
+	uint32_t word6;
+	uint32_t word7;
+	uint32_t word8;
+	uint32_t word9;
+	uint32_t word10;
+	uint32_t word11;
+	uint32_t word12;
+	uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK	0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD	word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT      6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK	0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD	word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT		8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK		0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD		word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT		9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK		0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD		word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD	word13
+/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
+	uint32_t bw_min;
+	uint32_t bw_max;
+	uint32_t iops_min;
+	uint32_t iops_max;
+	uint32_t reserved[4];
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM			2
+	uint32_t rsrc_desc_count;
+	struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+	struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE	0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT		0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE		0x2
+	struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM			2
+	uint32_t rsrc_desc_count;
+	struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+	struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE	0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT		0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE		0x2
+	union {
+		struct {
+			uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT	0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK	0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD	word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT	8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK	0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD	word10
+		} request;
+		struct {
+			struct lpfc_prof_cfg prof_cfg;
+		} response;
+	} u;
+};
+
+struct lpfc_controller_attribute {
+	uint32_t version_string[8];
+	uint32_t manufacturer_name[8];
+	uint32_t supported_modes;
+	uint32_t word17;
+#define lpfc_cntl_attr_eprom_ver_lo_SHIFT	0
+#define lpfc_cntl_attr_eprom_ver_lo_MASK	0x000000ff
+#define lpfc_cntl_attr_eprom_ver_lo_WORD	word17
+#define lpfc_cntl_attr_eprom_ver_hi_SHIFT	8
+#define lpfc_cntl_attr_eprom_ver_hi_MASK	0x000000ff
+#define lpfc_cntl_attr_eprom_ver_hi_WORD	word17
+	uint32_t mbx_da_struct_ver;
+	uint32_t ep_fw_da_struct_ver;
+	uint32_t ncsi_ver_str[3];
+	uint32_t dflt_ext_timeout;
+	uint32_t model_number[8];
+	uint32_t description[16];
+	uint32_t serial_number[8];
+	uint32_t ip_ver_str[8];
+	uint32_t fw_ver_str[8];
+	uint32_t bios_ver_str[8];
+	uint32_t redboot_ver_str[8];
+	uint32_t driver_ver_str[8];
+	uint32_t flash_fw_ver_str[8];
+	uint32_t functionality;
+	uint32_t word105;
+#define lpfc_cntl_attr_max_cbd_len_SHIFT	0
+#define lpfc_cntl_attr_max_cbd_len_MASK		0x0000ffff
+#define lpfc_cntl_attr_max_cbd_len_WORD		word105
+#define lpfc_cntl_attr_asic_rev_SHIFT		16
+#define lpfc_cntl_attr_asic_rev_MASK		0x000000ff
+#define lpfc_cntl_attr_asic_rev_WORD		word105
+#define lpfc_cntl_attr_gen_guid0_SHIFT		24
+#define lpfc_cntl_attr_gen_guid0_MASK		0x000000ff
+#define lpfc_cntl_attr_gen_guid0_WORD		word105
+	uint32_t gen_guid1_12[3];
+	uint32_t word109;
+#define lpfc_cntl_attr_gen_guid13_14_SHIFT	0
+#define lpfc_cntl_attr_gen_guid13_14_MASK	0x0000ffff
+#define lpfc_cntl_attr_gen_guid13_14_WORD	word109
+#define lpfc_cntl_attr_gen_guid15_SHIFT		16
+#define lpfc_cntl_attr_gen_guid15_MASK		0x000000ff
+#define lpfc_cntl_attr_gen_guid15_WORD		word109
+#define lpfc_cntl_attr_hba_port_cnt_SHIFT	24
+#define lpfc_cntl_attr_hba_port_cnt_MASK	0x000000ff
+#define lpfc_cntl_attr_hba_port_cnt_WORD	word109
+	uint32_t word110;
+#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT	0
+#define lpfc_cntl_attr_dflt_lnk_tmo_MASK	0x0000ffff
+#define lpfc_cntl_attr_dflt_lnk_tmo_WORD	word110
+#define lpfc_cntl_attr_multi_func_dev_SHIFT	24
+#define lpfc_cntl_attr_multi_func_dev_MASK	0x000000ff
+#define lpfc_cntl_attr_multi_func_dev_WORD	word110
+	uint32_t word111;
+#define lpfc_cntl_attr_cache_valid_SHIFT	0
+#define lpfc_cntl_attr_cache_valid_MASK		0x000000ff
+#define lpfc_cntl_attr_cache_valid_WORD		word111
+#define lpfc_cntl_attr_hba_status_SHIFT		8
+#define lpfc_cntl_attr_hba_status_MASK		0x000000ff
+#define lpfc_cntl_attr_hba_status_WORD		word111
+#define lpfc_cntl_attr_max_domain_SHIFT		16
+#define lpfc_cntl_attr_max_domain_MASK		0x000000ff
+#define lpfc_cntl_attr_max_domain_WORD		word111
+#define lpfc_cntl_attr_lnk_numb_SHIFT		24
+#define lpfc_cntl_attr_lnk_numb_MASK		0x0000003f
+#define lpfc_cntl_attr_lnk_numb_WORD		word111
+#define lpfc_cntl_attr_lnk_type_SHIFT		30
+#define lpfc_cntl_attr_lnk_type_MASK		0x00000003
+#define lpfc_cntl_attr_lnk_type_WORD		word111
+	uint32_t fw_post_status;
+	uint32_t hba_mtu[8];
+	uint32_t word121;
+	uint32_t reserved1[3];
+	uint32_t word125;
+#define lpfc_cntl_attr_pci_vendor_id_SHIFT	0
+#define lpfc_cntl_attr_pci_vendor_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_vendor_id_WORD	word125
+#define lpfc_cntl_attr_pci_device_id_SHIFT	16
+#define lpfc_cntl_attr_pci_device_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_device_id_WORD	word125
+	uint32_t word126;
+#define lpfc_cntl_attr_pci_subvdr_id_SHIFT	0
+#define lpfc_cntl_attr_pci_subvdr_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_subvdr_id_WORD	word126
+#define lpfc_cntl_attr_pci_subsys_id_SHIFT	16
+#define lpfc_cntl_attr_pci_subsys_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_subsys_id_WORD	word126
+	uint32_t word127;
+#define lpfc_cntl_attr_pci_bus_num_SHIFT	0
+#define lpfc_cntl_attr_pci_bus_num_MASK		0x000000ff
+#define lpfc_cntl_attr_pci_bus_num_WORD		word127
+#define lpfc_cntl_attr_pci_dev_num_SHIFT	8
+#define lpfc_cntl_attr_pci_dev_num_MASK		0x000000ff
+#define lpfc_cntl_attr_pci_dev_num_WORD		word127
+#define lpfc_cntl_attr_pci_fnc_num_SHIFT	16
+#define lpfc_cntl_attr_pci_fnc_num_MASK		0x000000ff
+#define lpfc_cntl_attr_pci_fnc_num_WORD		word127
+#define lpfc_cntl_attr_inf_type_SHIFT		24
+#define lpfc_cntl_attr_inf_type_MASK		0x000000ff
+#define lpfc_cntl_attr_inf_type_WORD		word127
+	uint32_t unique_id[2];
+	uint32_t word130;
+#define lpfc_cntl_attr_num_netfil_SHIFT		0
+#define lpfc_cntl_attr_num_netfil_MASK		0x000000ff
+#define lpfc_cntl_attr_num_netfil_WORD		word130
+	uint32_t reserved2[4];
+};
+
+struct lpfc_mbx_get_cntl_attributes {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	struct lpfc_controller_attribute cntl_attr;
+};
+
+struct lpfc_mbx_get_port_name {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_port_name_lnk_type_SHIFT	0
+#define lpfc_mbx_get_port_name_lnk_type_MASK	0x00000003
+#define lpfc_mbx_get_port_name_lnk_type_WORD	word4
+		} request;
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_port_name_name0_SHIFT	0
+#define lpfc_mbx_get_port_name_name0_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name0_WORD	word4
+#define lpfc_mbx_get_port_name_name1_SHIFT	8
+#define lpfc_mbx_get_port_name_name1_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name1_WORD	word4
+#define lpfc_mbx_get_port_name_name2_SHIFT	16
+#define lpfc_mbx_get_port_name_name2_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name2_WORD	word4
+#define lpfc_mbx_get_port_name_name3_SHIFT	24
+#define lpfc_mbx_get_port_name_name3_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name3_WORD	word4
+#define LPFC_LINK_NUMBER_0			0
+#define LPFC_LINK_NUMBER_1			1
+#define LPFC_LINK_NUMBER_2			2
+#define LPFC_LINK_NUMBER_3			3
+		} response;
+	} u;
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS			0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER		0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES	0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
+#define MB_CQE_STATUS_DMA_FAILED		0x5
+
+#define LPFC_MBX_WR_CONFIG_MAX_BDE		1
+struct lpfc_mbx_wr_object {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT		31
+#define lpfc_wr_object_eof_MASK			0x00000001
+#define lpfc_wr_object_eof_WORD			word4
+#define lpfc_wr_object_write_length_SHIFT	0
+#define lpfc_wr_object_write_length_MASK	0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD	word4
+			uint32_t write_offset;
+			uint32_t object_name[26];
+			uint32_t bde_count;
+			struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+		} request;
+		struct {
+			uint32_t actual_write_length;
+		} response;
+	} u;
+};
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+	uint32_t word0;
+#define lpfc_mqe_status_SHIFT		16
+#define lpfc_mqe_status_MASK		0x0000FFFF
+#define lpfc_mqe_status_WORD		word0
+#define lpfc_mqe_command_SHIFT		8
+#define lpfc_mqe_command_MASK		0x000000FF
+#define lpfc_mqe_command_WORD		word0
+	union {
+		uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+		/* sli4 mailbox commands */
+		struct lpfc_mbx_sli4_config sli4_config;
+		struct lpfc_mbx_init_vfi init_vfi;
+		struct lpfc_mbx_reg_vfi reg_vfi;
+		struct lpfc_mbx_reg_vfi unreg_vfi;
+		struct lpfc_mbx_init_vpi init_vpi;
+		struct lpfc_mbx_resume_rpi resume_rpi;
+		struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+		struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+		struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+		struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
+		struct lpfc_mbx_reg_fcfi reg_fcfi;
+		struct lpfc_mbx_reg_fcfi_mrq reg_fcfi_mrq;
+		struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+		struct lpfc_mbx_mq_create mq_create;
+		struct lpfc_mbx_mq_create_ext mq_create_ext;
+		struct lpfc_mbx_eq_create eq_create;
+		struct lpfc_mbx_modify_eq_delay eq_delay;
+		struct lpfc_mbx_cq_create cq_create;
+		struct lpfc_mbx_cq_create_set cq_create_set;
+		struct lpfc_mbx_wq_create wq_create;
+		struct lpfc_mbx_rq_create rq_create;
+		struct lpfc_mbx_rq_create_v2 rq_create_v2;
+		struct lpfc_mbx_mq_destroy mq_destroy;
+		struct lpfc_mbx_eq_destroy eq_destroy;
+		struct lpfc_mbx_cq_destroy cq_destroy;
+		struct lpfc_mbx_wq_destroy wq_destroy;
+		struct lpfc_mbx_rq_destroy rq_destroy;
+		struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+		struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+		struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
+		struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+		struct lpfc_mbx_nembed_cmd nembed_cmd;
+		struct lpfc_mbx_read_rev read_rev;
+		struct lpfc_mbx_read_vpi read_vpi;
+		struct lpfc_mbx_read_config rd_config;
+		struct lpfc_mbx_request_features req_ftrs;
+		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+		struct lpfc_mbx_query_fw_config query_fw_cfg;
+		struct lpfc_mbx_set_beacon_config beacon_config;
+		struct lpfc_mbx_supp_pages supp_pages;
+		struct lpfc_mbx_pc_sli4_params sli4_params;
+		struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+		struct lpfc_mbx_set_link_diag_state link_diag_state;
+		struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+		struct lpfc_mbx_run_link_diag_test link_diag_test;
+		struct lpfc_mbx_get_func_cfg get_func_cfg;
+		struct lpfc_mbx_get_prof_cfg get_prof_cfg;
+		struct lpfc_mbx_wr_object wr_object;
+		struct lpfc_mbx_get_port_name get_port_name;
+		struct lpfc_mbx_set_feature  set_feature;
+		struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
+		struct lpfc_mbx_set_host_data set_host_data;
+		struct lpfc_mbx_nop nop;
+	} un;
+};
+
+struct lpfc_mcqe {
+	uint32_t word0;
+#define lpfc_mcqe_status_SHIFT		0
+#define lpfc_mcqe_status_MASK		0x0000FFFF
+#define lpfc_mcqe_status_WORD		word0
+#define lpfc_mcqe_ext_status_SHIFT	16
+#define lpfc_mcqe_ext_status_MASK	0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD	word0
+	uint32_t mcqe_tag0;
+	uint32_t mcqe_tag1;
+	uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT	31
+#define lpfc_trailer_valid_MASK		0x00000001
+#define lpfc_trailer_valid_WORD		trailer
+#define lpfc_trailer_async_SHIFT	30
+#define lpfc_trailer_async_MASK		0x00000001
+#define lpfc_trailer_async_WORD		trailer
+#define lpfc_trailer_hpi_SHIFT		29
+#define lpfc_trailer_hpi_MASK		0x00000001
+#define lpfc_trailer_hpi_WORD		trailer
+#define lpfc_trailer_completed_SHIFT	28
+#define lpfc_trailer_completed_MASK	0x00000001
+#define lpfc_trailer_completed_WORD	trailer
+#define lpfc_trailer_consumed_SHIFT	27
+#define lpfc_trailer_consumed_MASK	0x00000001
+#define lpfc_trailer_consumed_WORD	trailer
+#define lpfc_trailer_type_SHIFT		16
+#define lpfc_trailer_type_MASK		0x000000FF
+#define lpfc_trailer_type_WORD		trailer
+#define lpfc_trailer_code_SHIFT		8
+#define lpfc_trailer_code_MASK		0x000000FF
+#define lpfc_trailer_code_WORD		trailer
+#define LPFC_TRAILER_CODE_LINK	0x1
+#define LPFC_TRAILER_CODE_FCOE	0x2
+#define LPFC_TRAILER_CODE_DCBX	0x3
+#define LPFC_TRAILER_CODE_GRP5	0x5
+#define LPFC_TRAILER_CODE_FC	0x10
+#define LPFC_TRAILER_CODE_SLI	0x11
+};
+
+struct lpfc_acqe_link {
+	uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT		24
+#define lpfc_acqe_link_speed_MASK		0x000000FF
+#define lpfc_acqe_link_speed_WORD		word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO		0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS		0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS		0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS		0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS		0x4
+#define LPFC_ASYNC_LINK_SPEED_20GBPS		0x5
+#define LPFC_ASYNC_LINK_SPEED_25GBPS		0x6
+#define LPFC_ASYNC_LINK_SPEED_40GBPS		0x7
+#define LPFC_ASYNC_LINK_SPEED_100GBPS		0x8
+#define lpfc_acqe_link_duplex_SHIFT		16
+#define lpfc_acqe_link_duplex_MASK		0x000000FF
+#define lpfc_acqe_link_duplex_WORD		word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE		0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF		0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL		0x2
+#define lpfc_acqe_link_status_SHIFT		8
+#define lpfc_acqe_link_status_MASK		0x000000FF
+#define lpfc_acqe_link_status_WORD		word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN		0x0
+#define LPFC_ASYNC_LINK_STATUS_UP		0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN	0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP	0x3
+#define lpfc_acqe_link_type_SHIFT		6
+#define lpfc_acqe_link_type_MASK		0x00000003
+#define lpfc_acqe_link_type_WORD		word0
+#define lpfc_acqe_link_number_SHIFT		0
+#define lpfc_acqe_link_number_MASK		0x0000003F
+#define lpfc_acqe_link_number_WORD		word0
+	uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT	0
+#define lpfc_acqe_link_fault_MASK	0x000000FF
+#define lpfc_acqe_link_fault_WORD	word1
+#define LPFC_ASYNC_LINK_FAULT_NONE	0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
+#define lpfc_acqe_logical_link_speed_SHIFT	16
+#define lpfc_acqe_logical_link_speed_MASK	0x0000FFFF
+#define lpfc_acqe_logical_link_speed_WORD	word1
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_LINK_EVENT_TYPE_PHYSICAL	0x0
+#define LPFC_LINK_EVENT_TYPE_VIRTUAL	0x1
+};
+
+struct lpfc_acqe_fip {
+	uint32_t index;
+	uint32_t word1;
+#define lpfc_acqe_fip_fcf_count_SHIFT		0
+#define lpfc_acqe_fip_fcf_count_MASK		0x0000FFFF
+#define lpfc_acqe_fip_fcf_count_WORD		word1
+#define lpfc_acqe_fip_event_type_SHIFT		16
+#define lpfc_acqe_fip_event_type_MASK		0x0000FFFF
+#define lpfc_acqe_fip_event_type_WORD		word1
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_FIP_EVENT_TYPE_NEW_FCF		0x1
+#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL	0x2
+#define LPFC_FIP_EVENT_TYPE_FCF_DEAD		0x3
+#define LPFC_FIP_EVENT_TYPE_CVL			0x4
+#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD	0x5
+};
+
+struct lpfc_acqe_dcbx {
+	uint32_t tlv_ttl;
+	uint32_t reserved;
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_grp5 {
+	uint32_t word0;
+#define lpfc_acqe_grp5_type_SHIFT		6
+#define lpfc_acqe_grp5_type_MASK		0x00000003
+#define lpfc_acqe_grp5_type_WORD		word0
+#define lpfc_acqe_grp5_number_SHIFT		0
+#define lpfc_acqe_grp5_number_MASK		0x0000003F
+#define lpfc_acqe_grp5_number_WORD		word0
+	uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT	16
+#define lpfc_acqe_grp5_llink_spd_MASK	0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD	word1
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_fc_la {
+	uint32_t word0;
+#define lpfc_acqe_fc_la_speed_SHIFT		24
+#define lpfc_acqe_fc_la_speed_MASK		0x000000FF
+#define lpfc_acqe_fc_la_speed_WORD		word0
+#define LPFC_FC_LA_SPEED_UNKNOWN		0x0
+#define LPFC_FC_LA_SPEED_1G		0x1
+#define LPFC_FC_LA_SPEED_2G		0x2
+#define LPFC_FC_LA_SPEED_4G		0x4
+#define LPFC_FC_LA_SPEED_8G		0x8
+#define LPFC_FC_LA_SPEED_10G		0xA
+#define LPFC_FC_LA_SPEED_16G		0x10
+#define LPFC_FC_LA_SPEED_32G            0x20
+#define lpfc_acqe_fc_la_topology_SHIFT		16
+#define lpfc_acqe_fc_la_topology_MASK		0x000000FF
+#define lpfc_acqe_fc_la_topology_WORD		word0
+#define LPFC_FC_LA_TOP_UNKOWN		0x0
+#define LPFC_FC_LA_TOP_P2P		0x1
+#define LPFC_FC_LA_TOP_FCAL		0x2
+#define LPFC_FC_LA_TOP_INTERNAL_LOOP	0x3
+#define LPFC_FC_LA_TOP_SERDES_LOOP	0x4
+#define lpfc_acqe_fc_la_att_type_SHIFT		8
+#define lpfc_acqe_fc_la_att_type_MASK		0x000000FF
+#define lpfc_acqe_fc_la_att_type_WORD		word0
+#define LPFC_FC_LA_TYPE_LINK_UP		0x1
+#define LPFC_FC_LA_TYPE_LINK_DOWN	0x2
+#define LPFC_FC_LA_TYPE_NO_HARD_ALPA	0x3
+#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN	0x4
+#define LPFC_FC_LA_TYPE_MDS_LOOPBACK	0x5
+#define LPFC_FC_LA_TYPE_UNEXP_WWPN	0x6
+#define lpfc_acqe_fc_la_port_type_SHIFT		6
+#define lpfc_acqe_fc_la_port_type_MASK		0x00000003
+#define lpfc_acqe_fc_la_port_type_WORD		word0
+#define LPFC_LINK_TYPE_ETHERNET		0x0
+#define LPFC_LINK_TYPE_FC		0x1
+#define lpfc_acqe_fc_la_port_number_SHIFT	0
+#define lpfc_acqe_fc_la_port_number_MASK	0x0000003F
+#define lpfc_acqe_fc_la_port_number_WORD	word0
+	uint32_t word1;
+#define lpfc_acqe_fc_la_llink_spd_SHIFT		16
+#define lpfc_acqe_fc_la_llink_spd_MASK		0x0000FFFF
+#define lpfc_acqe_fc_la_llink_spd_WORD		word1
+#define lpfc_acqe_fc_la_fault_SHIFT		0
+#define lpfc_acqe_fc_la_fault_MASK		0x000000FF
+#define lpfc_acqe_fc_la_fault_WORD		word1
+#define LPFC_FC_LA_FAULT_NONE		0x0
+#define LPFC_FC_LA_FAULT_LOCAL		0x1
+#define LPFC_FC_LA_FAULT_REMOTE		0x2
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_FC_LA_EVENT_TYPE_FC_LINK		0x1
+#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK	0x2
+};
+
+struct lpfc_acqe_misconfigured_event {
+	struct {
+	uint32_t word0;
+#define lpfc_sli_misconfigured_port0_state_SHIFT	0
+#define lpfc_sli_misconfigured_port0_state_MASK		0x000000FF
+#define lpfc_sli_misconfigured_port0_state_WORD		word0
+#define lpfc_sli_misconfigured_port1_state_SHIFT	8
+#define lpfc_sli_misconfigured_port1_state_MASK		0x000000FF
+#define lpfc_sli_misconfigured_port1_state_WORD		word0
+#define lpfc_sli_misconfigured_port2_state_SHIFT	16
+#define lpfc_sli_misconfigured_port2_state_MASK		0x000000FF
+#define lpfc_sli_misconfigured_port2_state_WORD		word0
+#define lpfc_sli_misconfigured_port3_state_SHIFT	24
+#define lpfc_sli_misconfigured_port3_state_MASK		0x000000FF
+#define lpfc_sli_misconfigured_port3_state_WORD		word0
+	uint32_t word1;
+#define lpfc_sli_misconfigured_port0_op_SHIFT		0
+#define lpfc_sli_misconfigured_port0_op_MASK		0x00000001
+#define lpfc_sli_misconfigured_port0_op_WORD		word1
+#define lpfc_sli_misconfigured_port0_severity_SHIFT	1
+#define lpfc_sli_misconfigured_port0_severity_MASK	0x00000003
+#define lpfc_sli_misconfigured_port0_severity_WORD	word1
+#define lpfc_sli_misconfigured_port1_op_SHIFT		8
+#define lpfc_sli_misconfigured_port1_op_MASK		0x00000001
+#define lpfc_sli_misconfigured_port1_op_WORD		word1
+#define lpfc_sli_misconfigured_port1_severity_SHIFT	9
+#define lpfc_sli_misconfigured_port1_severity_MASK	0x00000003
+#define lpfc_sli_misconfigured_port1_severity_WORD	word1
+#define lpfc_sli_misconfigured_port2_op_SHIFT		16
+#define lpfc_sli_misconfigured_port2_op_MASK		0x00000001
+#define lpfc_sli_misconfigured_port2_op_WORD		word1
+#define lpfc_sli_misconfigured_port2_severity_SHIFT	17
+#define lpfc_sli_misconfigured_port2_severity_MASK	0x00000003
+#define lpfc_sli_misconfigured_port2_severity_WORD	word1
+#define lpfc_sli_misconfigured_port3_op_SHIFT		24
+#define lpfc_sli_misconfigured_port3_op_MASK		0x00000001
+#define lpfc_sli_misconfigured_port3_op_WORD		word1
+#define lpfc_sli_misconfigured_port3_severity_SHIFT	25
+#define lpfc_sli_misconfigured_port3_severity_MASK	0x00000003
+#define lpfc_sli_misconfigured_port3_severity_WORD	word1
+	} theEvent;
+#define LPFC_SLI_EVENT_STATUS_VALID			0x00
+#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT	0x01
+#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE	0x02
+#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED	0x03
+#define LPFC_SLI_EVENT_STATUS_UNQUALIFIED	0x04
+#define LPFC_SLI_EVENT_STATUS_UNCERTIFIED	0x05
+};
+
+struct lpfc_acqe_sli {
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t reserved;
+	uint32_t trailer;
+#define LPFC_SLI_EVENT_TYPE_PORT_ERROR		0x1
+#define LPFC_SLI_EVENT_TYPE_OVER_TEMP		0x2
+#define LPFC_SLI_EVENT_TYPE_NORM_TEMP		0x3
+#define LPFC_SLI_EVENT_TYPE_NVLOG_POST		0x4
+#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP		0x5
+#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED	0x9
+#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT	0xA
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+	struct lpfc_mqe mqe;
+	struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI  0xffff
+
+struct wqe_common {
+	uint32_t word6;
+#define wqe_xri_tag_SHIFT     0
+#define wqe_xri_tag_MASK      0x0000FFFF
+#define wqe_xri_tag_WORD      word6
+#define wqe_ctxt_tag_SHIFT    16
+#define wqe_ctxt_tag_MASK     0x0000FFFF
+#define wqe_ctxt_tag_WORD     word6
+	uint32_t word7;
+#define wqe_dif_SHIFT         0
+#define wqe_dif_MASK          0x00000003
+#define wqe_dif_WORD          word7
+#define LPFC_WQE_DIF_PASSTHRU	1
+#define LPFC_WQE_DIF_STRIP	2
+#define LPFC_WQE_DIF_INSERT	3
+#define wqe_ct_SHIFT          2
+#define wqe_ct_MASK           0x00000003
+#define wqe_ct_WORD           word7
+#define wqe_status_SHIFT      4
+#define wqe_status_MASK       0x0000000f
+#define wqe_status_WORD       word7
+#define wqe_cmnd_SHIFT        8
+#define wqe_cmnd_MASK         0x000000ff
+#define wqe_cmnd_WORD         word7
+#define wqe_class_SHIFT       16
+#define wqe_class_MASK        0x00000007
+#define wqe_class_WORD        word7
+#define wqe_ar_SHIFT          19
+#define wqe_ar_MASK           0x00000001
+#define wqe_ar_WORD           word7
+#define wqe_ag_SHIFT          wqe_ar_SHIFT
+#define wqe_ag_MASK           wqe_ar_MASK
+#define wqe_ag_WORD           wqe_ar_WORD
+#define wqe_pu_SHIFT          20
+#define wqe_pu_MASK           0x00000003
+#define wqe_pu_WORD           word7
+#define wqe_erp_SHIFT         22
+#define wqe_erp_MASK          0x00000001
+#define wqe_erp_WORD          word7
+#define wqe_conf_SHIFT        wqe_erp_SHIFT
+#define wqe_conf_MASK         wqe_erp_MASK
+#define wqe_conf_WORD         wqe_erp_WORD
+#define wqe_lnk_SHIFT         23
+#define wqe_lnk_MASK          0x00000001
+#define wqe_lnk_WORD          word7
+#define wqe_tmo_SHIFT         24
+#define wqe_tmo_MASK          0x000000ff
+#define wqe_tmo_WORD          word7
+	uint32_t abort_tag; /* word 8 in WQE */
+	uint32_t word9;
+#define wqe_reqtag_SHIFT      0
+#define wqe_reqtag_MASK       0x0000FFFF
+#define wqe_reqtag_WORD       word9
+#define wqe_temp_rpi_SHIFT    16
+#define wqe_temp_rpi_MASK     0x0000FFFF
+#define wqe_temp_rpi_WORD     word9
+#define wqe_rcvoxid_SHIFT     16
+#define wqe_rcvoxid_MASK      0x0000FFFF
+#define wqe_rcvoxid_WORD      word9
+	uint32_t word10;
+#define wqe_ebde_cnt_SHIFT    0
+#define wqe_ebde_cnt_MASK     0x0000000f
+#define wqe_ebde_cnt_WORD     word10
+#define wqe_nvme_SHIFT        4
+#define wqe_nvme_MASK         0x00000001
+#define wqe_nvme_WORD         word10
+#define wqe_oas_SHIFT         6
+#define wqe_oas_MASK          0x00000001
+#define wqe_oas_WORD          word10
+#define wqe_lenloc_SHIFT      7
+#define wqe_lenloc_MASK       0x00000003
+#define wqe_lenloc_WORD       word10
+#define LPFC_WQE_LENLOC_NONE		0
+#define LPFC_WQE_LENLOC_WORD3	1
+#define LPFC_WQE_LENLOC_WORD12	2
+#define LPFC_WQE_LENLOC_WORD4	3
+#define wqe_qosd_SHIFT        9
+#define wqe_qosd_MASK         0x00000001
+#define wqe_qosd_WORD         word10
+#define wqe_xbl_SHIFT         11
+#define wqe_xbl_MASK          0x00000001
+#define wqe_xbl_WORD          word10
+#define wqe_iod_SHIFT         13
+#define wqe_iod_MASK          0x00000001
+#define wqe_iod_WORD          word10
+#define LPFC_WQE_IOD_WRITE	0
+#define LPFC_WQE_IOD_READ	1
+#define wqe_dbde_SHIFT        14
+#define wqe_dbde_MASK         0x00000001
+#define wqe_dbde_WORD         word10
+#define wqe_wqes_SHIFT        15
+#define wqe_wqes_MASK         0x00000001
+#define wqe_wqes_WORD         word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT        1
+#define wqe_wqid_MASK         0x00007fff
+#define wqe_wqid_WORD         word10
+#define wqe_pri_SHIFT         16
+#define wqe_pri_MASK          0x00000007
+#define wqe_pri_WORD          word10
+#define wqe_pv_SHIFT          19
+#define wqe_pv_MASK           0x00000001
+#define wqe_pv_WORD           word10
+#define wqe_xc_SHIFT          21
+#define wqe_xc_MASK           0x00000001
+#define wqe_xc_WORD           word10
+#define wqe_sr_SHIFT          22
+#define wqe_sr_MASK           0x00000001
+#define wqe_sr_WORD           word10
+#define wqe_ccpe_SHIFT        23
+#define wqe_ccpe_MASK         0x00000001
+#define wqe_ccpe_WORD         word10
+#define wqe_ccp_SHIFT         24
+#define wqe_ccp_MASK          0x000000ff
+#define wqe_ccp_WORD          word10
+	uint32_t word11;
+#define wqe_cmd_type_SHIFT    0
+#define wqe_cmd_type_MASK     0x0000000f
+#define wqe_cmd_type_WORD     word11
+#define wqe_els_id_SHIFT      4
+#define wqe_els_id_MASK       0x00000003
+#define wqe_els_id_WORD       word11
+#define LPFC_ELS_ID_FLOGI	3
+#define LPFC_ELS_ID_FDISC	2
+#define LPFC_ELS_ID_LOGO	1
+#define LPFC_ELS_ID_DEFAULT	0
+#define wqe_irsp_SHIFT        4
+#define wqe_irsp_MASK         0x00000001
+#define wqe_irsp_WORD         word11
+#define wqe_sup_SHIFT         6
+#define wqe_sup_MASK          0x00000001
+#define wqe_sup_WORD          word11
+#define wqe_wqec_SHIFT        7
+#define wqe_wqec_MASK         0x00000001
+#define wqe_wqec_WORD         word11
+#define wqe_irsplen_SHIFT     8
+#define wqe_irsplen_MASK      0x0000000f
+#define wqe_irsplen_WORD      word11
+#define wqe_cqid_SHIFT        16
+#define wqe_cqid_MASK         0x0000ffff
+#define wqe_cqid_WORD         word11
+#define LPFC_WQE_CQ_ID_DEFAULT	0xffff
+};
+
+struct wqe_did {
+	uint32_t word5;
+#define wqe_els_did_SHIFT         0
+#define wqe_els_did_MASK          0x00FFFFFF
+#define wqe_els_did_WORD          word5
+#define wqe_xmit_bls_pt_SHIFT         28
+#define wqe_xmit_bls_pt_MASK          0x00000003
+#define wqe_xmit_bls_pt_WORD          word5
+#define wqe_xmit_bls_ar_SHIFT         30
+#define wqe_xmit_bls_ar_MASK          0x00000001
+#define wqe_xmit_bls_ar_WORD          word5
+#define wqe_xmit_bls_xo_SHIFT         31
+#define wqe_xmit_bls_xo_MASK          0x00000001
+#define wqe_xmit_bls_xo_WORD          word5
+};
+
+struct lpfc_wqe_generic{
+	struct ulp_bde64 bde;
+	uint32_t word3;
+	uint32_t word4;
+	uint32_t word5;
+	struct wqe_common wqe_com;
+	uint32_t payload[4];
+};
+
+struct els_request64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;
+	uint32_t word4;
+#define els_req64_sid_SHIFT         0
+#define els_req64_sid_MASK          0x00FFFFFF
+#define els_req64_sid_WORD          word4
+#define els_req64_sp_SHIFT          24
+#define els_req64_sp_MASK           0x00000001
+#define els_req64_sp_WORD           word4
+#define els_req64_vf_SHIFT          25
+#define els_req64_vf_MASK           0x00000001
+#define els_req64_vf_WORD           word4
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define els_req64_vfid_SHIFT        1
+#define els_req64_vfid_MASK         0x00000FFF
+#define els_req64_vfid_WORD         word12
+#define els_req64_pri_SHIFT         13
+#define els_req64_pri_MASK          0x00000007
+#define els_req64_pri_WORD          word12
+	uint32_t word13;
+#define els_req64_hopcnt_SHIFT      24
+#define els_req64_hopcnt_MASK       0x000000ff
+#define els_req64_hopcnt_WORD       word13
+	uint32_t word14;
+	uint32_t max_response_payload_len;
+};
+
+struct xmit_els_rsp64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t response_payload_len;
+	uint32_t word4;
+#define els_rsp64_sid_SHIFT         0
+#define els_rsp64_sid_MASK          0x00FFFFFF
+#define els_rsp64_sid_WORD          word4
+#define els_rsp64_sp_SHIFT          24
+#define els_rsp64_sp_MASK           0x00000001
+#define els_rsp64_sp_WORD           word4
+	struct wqe_did wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define wqe_rsp_temp_rpi_SHIFT    0
+#define wqe_rsp_temp_rpi_MASK     0x0000FFFF
+#define wqe_rsp_temp_rpi_WORD     word12
+	uint32_t rsvd_13_15[3];
+};
+
+struct xmit_bls_rsp64_wqe {
+	uint32_t payload0;
+/* Payload0 for BA_ACC */
+#define xmit_bls_rsp64_acc_seq_id_SHIFT        16
+#define xmit_bls_rsp64_acc_seq_id_MASK         0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_WORD         payload0
+#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT   24
+#define xmit_bls_rsp64_acc_seq_id_vald_MASK    0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_vald_WORD    payload0
+/* Payload0 for BA_RJT */
+#define xmit_bls_rsp64_rjt_vspec_SHIFT   0
+#define xmit_bls_rsp64_rjt_vspec_MASK    0x000000ff
+#define xmit_bls_rsp64_rjt_vspec_WORD    payload0
+#define xmit_bls_rsp64_rjt_expc_SHIFT    8
+#define xmit_bls_rsp64_rjt_expc_MASK     0x000000ff
+#define xmit_bls_rsp64_rjt_expc_WORD     payload0
+#define xmit_bls_rsp64_rjt_rsnc_SHIFT    16
+#define xmit_bls_rsp64_rjt_rsnc_MASK     0x000000ff
+#define xmit_bls_rsp64_rjt_rsnc_WORD     payload0
+	uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT  0
+#define xmit_bls_rsp64_rxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD   word1
+#define xmit_bls_rsp64_oxid_SHIFT  16
+#define xmit_bls_rsp64_oxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD   word1
+	uint32_t word2;
+#define xmit_bls_rsp64_seqcnthi_SHIFT  0
+#define xmit_bls_rsp64_seqcnthi_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD   word2
+#define xmit_bls_rsp64_seqcntlo_SHIFT  16
+#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD   word2
+	uint32_t rsrvd3;
+	uint32_t rsrvd4;
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define xmit_bls_rsp64_temprpi_SHIFT  0
+#define xmit_bls_rsp64_temprpi_MASK   0x0000ffff
+#define xmit_bls_rsp64_temprpi_WORD   word12
+	uint32_t rsvd_13_15[3];
+};
+
+struct wqe_rctl_dfctl {
+	uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK  0x000000001
+#define wqe_si_WORD  word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK  0x000000001
+#define wqe_la_WORD  word5
+#define wqe_xo_SHIFT	6
+#define wqe_xo_MASK	0x000000001
+#define wqe_xo_WORD	word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK  0x000000001
+#define wqe_ls_WORD  word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK  0x0000000ff
+#define wqe_dfctl_WORD  word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK  0x0000000ff
+#define wqe_type_WORD  word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK  0x0000000ff
+#define wqe_rctl_WORD  word5
+};
+
+struct xmit_seq64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t rsvd3;
+	uint32_t relative_offset;
+	struct wqe_rctl_dfctl wge_ctl;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t xmit_len;
+	uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t seq_payload_len;
+	uint32_t rsvd4;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t request_payload_len;
+	uint32_t relative_offset;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_14[3];
+	uint32_t max_response_payload_len;
+};
+
+/* Define NVME PRLI request to fabric. NVME is a
+ * fabric-only protocol.
+ * Updated to red-lined v1.08 on Sept 16, 2016
+ */
+struct lpfc_nvme_prli {
+	uint32_t word1;
+	/* The Response Code is defined in the FCP PRLI lpfc_hw.h */
+#define prli_acc_rsp_code_SHIFT         8
+#define prli_acc_rsp_code_MASK          0x0000000f
+#define prli_acc_rsp_code_WORD          word1
+#define prli_estabImagePair_SHIFT       13
+#define prli_estabImagePair_MASK        0x00000001
+#define prli_estabImagePair_WORD        word1
+#define prli_type_code_ext_SHIFT        16
+#define prli_type_code_ext_MASK         0x000000ff
+#define prli_type_code_ext_WORD         word1
+#define prli_type_code_SHIFT            24
+#define prli_type_code_MASK             0x000000ff
+#define prli_type_code_WORD             word1
+	uint32_t word_rsvd2;
+	uint32_t word_rsvd3;
+	uint32_t word4;
+#define prli_fba_SHIFT                  0
+#define prli_fba_MASK                   0x00000001
+#define prli_fba_WORD                   word4
+#define prli_disc_SHIFT                 3
+#define prli_disc_MASK                  0x00000001
+#define prli_disc_WORD                  word4
+#define prli_tgt_SHIFT                  4
+#define prli_tgt_MASK                   0x00000001
+#define prli_tgt_WORD                   word4
+#define prli_init_SHIFT                 5
+#define prli_init_MASK                  0x00000001
+#define prli_init_WORD                  word4
+#define prli_recov_SHIFT                8
+#define prli_recov_MASK                 0x00000001
+#define prli_recov_WORD                 word4
+	uint32_t word5;
+#define prli_fb_sz_SHIFT                0
+#define prli_fb_sz_MASK                 0x0000ffff
+#define prli_fb_sz_WORD                 word5
+#define LPFC_NVMET_FB_SZ_MAX  65536   /* Driver target mode only. */
+};
+
+struct create_xri_wqe {
+	uint32_t rsrvd[5];           /* words 0-4 */
+	struct wqe_did	wqe_dest;  /* word 5 */
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+	uint32_t rsrvd[3];
+	uint32_t word3;
+#define	abort_cmd_ia_SHIFT  0
+#define	abort_cmd_ia_MASK  0x000000001
+#define	abort_cmd_ia_WORD  word3
+#define	abort_cmd_criteria_SHIFT  8
+#define	abort_cmd_criteria_MASK  0x0000000ff
+#define	abort_cmd_criteria_WORD  word3
+	uint32_t rsrvd4;
+	uint32_t rsrvd5;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t word3;
+#define	cmd_buff_len_SHIFT  16
+#define	cmd_buff_len_MASK  0x00000ffff
+#define	cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
+	uint32_t total_xfer_len;
+	uint32_t initial_xfer_len;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsrvd12;
+	struct ulp_bde64 ph_bde;       /* words 13-15 */
+};
+
+struct fcp_iread64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t word3;
+#define	cmd_buff_len_SHIFT  16
+#define	cmd_buff_len_MASK  0x00000ffff
+#define	cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
+	uint32_t total_xfer_len;       /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsrvd12;
+	struct ulp_bde64 ph_bde;       /* words 13-15 */
+};
+
+struct fcp_icmnd64_wqe {
+	struct ulp_bde64 bde;          /* words 0-2 */
+	uint32_t word3;
+#define	cmd_buff_len_SHIFT  16
+#define	cmd_buff_len_MASK  0x00000ffff
+#define	cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
+	uint32_t rsrvd4;               /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];        /* word 12-15 */
+};
+
+struct fcp_trsp64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t response_len;
+	uint32_t rsvd_4_5[2];
+	struct wqe_common wqe_com;      /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_tsend64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_offset_len;
+	uint32_t relative_offset;
+	uint32_t reserved;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t fcp_data_len;         /* word 12 */
+	uint32_t rsvd_13_15[3];        /* word 13-15 */
+};
+
+struct fcp_treceive64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_offset_len;
+	uint32_t relative_offset;
+	uint32_t reserved;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t fcp_data_len;         /* word 12 */
+	uint32_t rsvd_13_15[3];        /* word 13-15 */
+};
+#define TXRDY_PAYLOAD_LEN      12
+
+#define CMD_SEND_FRAME	0xE1
+
+struct send_frame_wqe {
+	struct ulp_bde64 bde;          /* words 0-2 */
+	uint32_t frame_len;            /* word 3 */
+	uint32_t fc_hdr_wd0;           /* word 4 */
+	uint32_t fc_hdr_wd1;           /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t fc_hdr_wd2;           /* word 12 */
+	uint32_t fc_hdr_wd3;           /* word 13 */
+	uint32_t fc_hdr_wd4;           /* word 14 */
+	uint32_t fc_hdr_wd5;           /* word 15 */
+};
+
+union lpfc_wqe {
+	uint32_t words[16];
+	struct lpfc_wqe_generic generic;
+	struct fcp_icmnd64_wqe fcp_icmd;
+	struct fcp_iread64_wqe fcp_iread;
+	struct fcp_iwrite64_wqe fcp_iwrite;
+	struct abort_cmd_wqe abort_cmd;
+	struct create_xri_wqe create_xri;
+	struct xmit_bcast64_wqe xmit_bcast64;
+	struct xmit_seq64_wqe xmit_sequence;
+	struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+	struct xmit_els_rsp64_wqe xmit_els_rsp;
+	struct els_request64_wqe els_req;
+	struct gen_req64_wqe gen_req;
+	struct fcp_trsp64_wqe fcp_trsp;
+	struct fcp_tsend64_wqe fcp_tsend;
+	struct fcp_treceive64_wqe fcp_treceive;
+	struct send_frame_wqe send_frame;
+};
+
+union lpfc_wqe128 {
+	uint32_t words[32];
+	struct lpfc_wqe_generic generic;
+	struct fcp_icmnd64_wqe fcp_icmd;
+	struct fcp_iread64_wqe fcp_iread;
+	struct fcp_iwrite64_wqe fcp_iwrite;
+	struct fcp_trsp64_wqe fcp_trsp;
+	struct fcp_tsend64_wqe fcp_tsend;
+	struct fcp_treceive64_wqe fcp_treceive;
+	struct xmit_seq64_wqe xmit_sequence;
+	struct gen_req64_wqe gen_req;
+};
+
+#define LPFC_GROUP_OJECT_MAGIC_G5		0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G6		0xfeaa0003
+#define LPFC_FILE_TYPE_GROUP			0xf7
+#define LPFC_FILE_ID_GROUP			0xa2
+struct lpfc_grp_hdr {
+	uint32_t size;
+	uint32_t magic_number;
+	uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT	24
+#define lpfc_grp_hdr_file_type_MASK	0x000000FF
+#define lpfc_grp_hdr_file_type_WORD	word2
+#define lpfc_grp_hdr_id_SHIFT		16
+#define lpfc_grp_hdr_id_MASK		0x000000FF
+#define lpfc_grp_hdr_id_WORD		word2
+	uint8_t rev_name[128];
+	uint8_t date[12];
+	uint8_t revision[32];
+};
+
+/* Defines for WQE command type */
+#define FCP_COMMAND		0x0
+#define NVME_READ_CMD		0x0
+#define FCP_COMMAND_DATA_OUT	0x1
+#define NVME_WRITE_CMD		0x1
+#define FCP_COMMAND_TRECEIVE	0x2
+#define FCP_COMMAND_TRSP	0x3
+#define FCP_COMMAND_TSEND	0x7
+#define OTHER_COMMAND		0x8
+#define ELS_COMMAND_NON_FIP	0xC
+#define ELS_COMMAND_FIP		0xD
+
+#define LPFC_NVME_EMBED_CMD	0x0
+#define LPFC_NVME_EMBED_WRITE	0x1
+#define LPFC_NVME_EMBED_READ	0x2
+
+/* WQE Commands */
+#define CMD_ABORT_XRI_WQE       0x0F
+#define CMD_XMIT_SEQUENCE64_WQE 0x82
+#define CMD_XMIT_BCAST64_WQE    0x84
+#define CMD_ELS_REQUEST64_WQE   0x8A
+#define CMD_XMIT_ELS_RSP64_WQE  0x95
+#define CMD_XMIT_BLS_RSP64_WQE  0x97
+#define CMD_FCP_IWRITE64_WQE    0x98
+#define CMD_FCP_IREAD64_WQE     0x9A
+#define CMD_FCP_ICMND64_WQE     0x9C
+#define CMD_FCP_TSEND64_WQE     0x9F
+#define CMD_FCP_TRECEIVE64_WQE  0xA1
+#define CMD_FCP_TRSP64_WQE      0xA3
+#define CMD_GEN_REQUEST64_WQE   0xC2
+
+#define CMD_WQE_MASK            0xff
+
+
+#define LPFC_FW_DUMP	1
+#define LPFC_FW_RESET	2
+#define LPFC_DV_RESET	3
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_ids.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_ids.h
new file mode 100644
index 0000000..0ba3733
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_ids.h
@@ -0,0 +1,124 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/pci.h>
+
+const struct pci_device_id lpfc_id_table[] = {
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0 }
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_init.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_init.c
new file mode 100644
index 0000000..9fc5507
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_init.c
@@ -0,0 +1,12348 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/aer.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/miscdevice.h>
+#include <linux/percpu.h>
+#include <linux/msi.h>
+#include <linux/bitops.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+#include "lpfc_ids.h"
+
+char *_dump_buf_data;
+unsigned long _dump_buf_data_order;
+char *_dump_buf_dif;
+unsigned long _dump_buf_dif_order;
+spinlock_t _dump_buf_lock;
+
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t *lpfc_used_cpu;
+uint32_t lpfc_present_cpu;
+
+static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_sli4_queue_verify(struct lpfc_hba *);
+static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
+static int lpfc_setup_endian_order(struct lpfc_hba *);
+static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
+static void lpfc_free_els_sgl_list(struct lpfc_hba *);
+static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
+static void lpfc_init_sgl_list(struct lpfc_hba *);
+static int lpfc_init_active_sgl_array(struct lpfc_hba *);
+static void lpfc_free_active_sgl(struct lpfc_hba *);
+static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
+static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
+static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
+static void lpfc_sli4_disable_intr(struct lpfc_hba *);
+static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
+static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
+
+static struct scsi_transport_template *lpfc_transport_template = NULL;
+static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
+static DEFINE_IDR(lpfc_hba_index);
+#define LPFC_NVMET_BUF_POST 254
+
+/**
+ * lpfc_config_port_prep - Perform lpfc initialization prior to config port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
+ * mailbox command. It retrieves the revision information from the HBA and
+ * collects the Vital Product Data (VPD) about the HBA for preparing the
+ * configuration of the HBA.
+ *
+ * Return codes:
+ *   0 - success.
+ *   -ERESTART - requests the SLI layer to reset the HBA and try again.
+ *   Any other value - indicates an error.
+ **/
+int
+lpfc_config_port_prep(struct lpfc_hba *phba)
+{
+	lpfc_vpd_t *vp = &phba->vpd;
+	int i = 0, rc;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	char *lpfc_vpd_data = NULL;
+	uint16_t offset = 0;
+	static char licensed[56] =
+		    "key unlock for use with gnu public licensed code only\0";
+	static int init_key = 1;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	mb = &pmb->u.mb;
+	phba->link_state = LPFC_INIT_MBX_CMDS;
+
+	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+		if (init_key) {
+			uint32_t *ptext = (uint32_t *) licensed;
+
+			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+				*ptext = cpu_to_be32(*ptext);
+			init_key = 0;
+		}
+
+		lpfc_read_nv(phba, pmb);
+		memset((char*)mb->un.varRDnvp.rsvd3, 0,
+			sizeof (mb->un.varRDnvp.rsvd3));
+		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
+			 sizeof (licensed));
+
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+					"0324 Config Port initialization "
+					"error, mbxCmd x%x READ_NVPARM, "
+					"mbxStatus x%x\n",
+					mb->mbxCommand, mb->mbxStatus);
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -ERESTART;
+		}
+		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
+		       sizeof(phba->wwnn));
+		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
+		       sizeof(phba->wwpn));
+	}
+
+	phba->sli3_options = 0x0;
+
+	/* Setup and issue mailbox READ REV command */
+	lpfc_read_rev(phba, pmb);
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0439 Adapter failed to init, mbxCmd x%x "
+				"READ_REV, mbxStatus x%x\n",
+				mb->mbxCommand, mb->mbxStatus);
+		mempool_free( pmb, phba->mbox_mem_pool);
+		return -ERESTART;
+	}
+
+
+	/*
+	 * The value of rr must be 1 since the driver set the cv field to 1.
+	 * This setting requires the FW to set all revision fields.
+	 */
+	if (mb->un.varRdRev.rr == 0) {
+		vp->rev.rBit = 0;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0440 Adapter failed to init, READ_REV has "
+				"missing revision information.\n");
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ERESTART;
+	}
+
+	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -EINVAL;
+	}
+
+	/* Save information as VPD data */
+	vp->rev.rBit = 1;
+	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
+	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
+	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
+	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
+	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
+	vp->rev.biuRev = mb->un.varRdRev.biuRev;
+	vp->rev.smRev = mb->un.varRdRev.smRev;
+	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
+	vp->rev.endecRev = mb->un.varRdRev.endecRev;
+	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
+	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
+	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
+	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
+	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
+	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+
+	/* If the sli feature level is less then 9, we must
+	 * tear down all RPIs and VPIs on link down if NPIV
+	 * is enabled.
+	 */
+	if (vp->rev.feaLevelHigh < 9)
+		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
+
+	if (lpfc_is_LC_HBA(phba->pcidev->device))
+		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
+						sizeof (phba->RandomData));
+
+	/* Get adapter VPD information */
+	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
+	if (!lpfc_vpd_data)
+		goto out_free_mbox;
+	do {
+		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0441 VPD not present on adapter, "
+					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
+					mb->mbxCommand, mb->mbxStatus);
+			mb->un.varDmp.word_cnt = 0;
+		}
+		/* dump mem may return a zero when finished or we got a
+		 * mailbox error, either way we are done.
+		 */
+		if (mb->un.varDmp.word_cnt == 0)
+			break;
+		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
+			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
+		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				      lpfc_vpd_data + offset,
+				      mb->un.varDmp.word_cnt);
+		offset += mb->un.varDmp.word_cnt;
+	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
+	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
+
+	kfree(lpfc_vpd_data);
+out_free_mbox:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
+ * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for driver's configuring asynchronous event
+ * mailbox command to the device. If the mailbox command returns successfully,
+ * it will set internal async event support flag to 1; otherwise, it will
+ * set internal async event support flag to 0.
+ **/
+static void
+lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
+		phba->temp_sensor_support = 1;
+	else
+		phba->temp_sensor_support = 0;
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for dump mailbox command for getting
+ * wake up parameters. When this command complete, the response contain
+ * Option rom version of the HBA. This function translate the version number
+ * into a human readable string and store it in OptionROMVersion.
+ **/
+static void
+lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct prog_id *prg;
+	uint32_t prog_id_word;
+	char dist = ' ';
+	/* character array used for decoding dist type. */
+	char dist_char[] = "nabx";
+
+	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	prg = (struct prog_id *) &prog_id_word;
+
+	/* word 7 contain option rom version */
+	prog_id_word = pmboxq->u.mb.un.varWords[7];
+
+	/* Decode the Option rom version word to a readable string */
+	if (prg->dist < 4)
+		dist = dist_char[prg->dist];
+
+	if ((prg->dist == 3) && (prg->num == 0))
+		snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
+			prg->ver, prg->rev, prg->lev);
+	else
+		snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
+			prg->ver, prg->rev, prg->lev,
+			dist, prg->num);
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ *	cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ *   None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+	uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
+	u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
+
+	/* If the soft name exists then update it using the service params */
+	if (vport->phba->cfg_soft_wwnn)
+		u64_to_wwn(vport->phba->cfg_soft_wwnn,
+			   vport->fc_sparam.nodeName.u.wwn);
+	if (vport->phba->cfg_soft_wwpn)
+		u64_to_wwn(vport->phba->cfg_soft_wwpn,
+			   vport->fc_sparam.portName.u.wwn);
+
+	/*
+	 * If the name is empty or there exists a soft name
+	 * then copy the service params name, otherwise use the fc name
+	 */
+	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+			sizeof(struct lpfc_name));
+	else
+		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+			sizeof(struct lpfc_name));
+
+	/*
+	 * If the port name has changed, then set the Param changes flag
+	 * to unreg the login
+	 */
+	if (vport->fc_portname.u.wwn[0] != 0 &&
+		memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
+			sizeof(struct lpfc_name)))
+		vport->vport_flag |= FAWWPN_PARAM_CHG;
+
+	if (vport->fc_portname.u.wwn[0] == 0 ||
+	    vport->phba->cfg_soft_wwpn ||
+	    (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
+	    vport->vport_flag & FAWWPN_SET) {
+		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+			sizeof(struct lpfc_name));
+		vport->vport_flag &= ~FAWWPN_SET;
+		if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
+			vport->vport_flag |= FAWWPN_SET;
+	}
+	else
+		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+			sizeof(struct lpfc_name));
+}
+
+/**
+ * lpfc_config_port_post - Perform lpfc initialization after config port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization after the CONFIG_PORT mailbox
+ * command call. It performs all internal resource and state setups on the
+ * port: post IOCB buffers, enable appropriate host interrupt attentions,
+ * ELS ring timers, etc.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+int
+lpfc_config_port_post(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t status, timeout;
+	int i, j;
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/*
+	 * If the Config port completed correctly the HBA is not
+	 * over heated any more.
+	 */
+	if (phba->over_temp_state == HBA_OVER_TEMP)
+		phba->over_temp_state = HBA_NORMAL_TEMP;
+	spin_unlock_irq(&phba->hbalock);
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+
+	/* Get login parameters for NID.  */
+	rc = lpfc_read_sparam(phba, pmb, 0);
+	if (rc) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+
+	pmb->vport = vport;
+	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0448 Adapter failed init, mbxCmd x%x "
+				"READ_SPARM mbxStatus x%x\n",
+				mb->mbxCommand, mb->mbxStatus);
+		phba->link_state = LPFC_HBA_ERROR;
+		mp = (struct lpfc_dmabuf *) pmb->context1;
+		mempool_free(pmb, phba->mbox_mem_pool);
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		return -EIO;
+	}
+
+	mp = (struct lpfc_dmabuf *) pmb->context1;
+
+	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	pmb->context1 = NULL;
+	lpfc_update_vport_wwn(vport);
+
+	/* Update the fc_host data structures with new wwn. */
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+	fc_host_max_npiv_vports(shost) = phba->max_vpi;
+
+	/* If no serial number in VPD data, use low 6 bytes of WWNN */
+	/* This should be consolidated into parse_vpd ? - mr */
+	if (phba->SerialNumber[0] == 0) {
+		uint8_t *outptr;
+
+		outptr = &vport->fc_nodename.u.s.IEEE[0];
+		for (i = 0; i < 12; i++) {
+			status = *outptr++;
+			j = ((status & 0xf0) >> 4);
+			if (j <= 9)
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x30 + (uint8_t) j);
+			else
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+			i++;
+			j = (status & 0xf);
+			if (j <= 9)
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x30 + (uint8_t) j);
+			else
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+		}
+	}
+
+	lpfc_read_config(phba, pmb);
+	pmb->vport = vport;
+	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0453 Adapter failed to init, mbxCmd x%x "
+				"READ_CONFIG, mbxStatus x%x\n",
+				mb->mbxCommand, mb->mbxStatus);
+		phba->link_state = LPFC_HBA_ERROR;
+		mempool_free( pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	/* Check if the port is disabled */
+	lpfc_sli_read_link_ste(phba);
+
+	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+	i = (mb->un.varRdConfig.max_xri + 1);
+	if (phba->cfg_hba_queue_depth > i) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3359 HBA queue depth changed from %d to %d\n",
+				phba->cfg_hba_queue_depth, i);
+		phba->cfg_hba_queue_depth = i;
+	}
+
+	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
+	i = (mb->un.varRdConfig.max_xri >> 3);
+	if (phba->pport->cfg_lun_queue_depth > i) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3360 LUN queue depth changed from %d to %d\n",
+				phba->pport->cfg_lun_queue_depth, i);
+		phba->pport->cfg_lun_queue_depth = i;
+	}
+
+	phba->lmt = mb->un.varRdConfig.lmt;
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	phba->link_state = LPFC_LINK_DOWN;
+
+	/* Only process IOCBs on ELS ring till hba_state is READY */
+	if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
+		psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
+	if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
+		psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Post receive buffers for desired rings */
+	if (phba->sli_rev != 3)
+		lpfc_post_rcv_buf(phba);
+
+	/*
+	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
+	 */
+	if (phba->intr_type == MSIX) {
+		rc = lpfc_config_msi(phba, pmb);
+		if (rc) {
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+					"0352 Config MSI mailbox command "
+					"failed, mbxCmd x%x, mbxStatus x%x\n",
+					pmb->u.mb.mbxCommand,
+					pmb->u.mb.mbxStatus);
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	/* Initialize ERATT handling flag */
+	phba->hba_flag &= ~HBA_ERATT_HANDLED;
+
+	/* Enable appropriate host interrupts */
+	if (lpfc_readl(phba->HCregaddr, &status)) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EIO;
+	}
+	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
+	if (psli->num_rings > 0)
+		status |= HC_R0INT_ENA;
+	if (psli->num_rings > 1)
+		status |= HC_R1INT_ENA;
+	if (psli->num_rings > 2)
+		status |= HC_R2INT_ENA;
+	if (psli->num_rings > 3)
+		status |= HC_R3INT_ENA;
+
+	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
+	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
+		status &= ~(HC_R0INT_ENA);
+
+	writel(status, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Set up ring-0 (ELS) timer */
+	timeout = phba->fc_ratov * 2;
+	mod_timer(&vport->els_tmofunc,
+		  jiffies + msecs_to_jiffies(1000 * timeout));
+	/* Set up heart beat (HB) timer */
+	mod_timer(&phba->hb_tmofunc,
+		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+	phba->hb_outstanding = 0;
+	phba->last_completion_time = jiffies;
+	/* Set up error attention (ERATT) polling timer */
+	mod_timer(&phba->eratt_poll,
+		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+
+	if (phba->hba_flag & LINK_DISABLED) {
+		lpfc_printf_log(phba,
+			KERN_ERR, LOG_INIT,
+			"2598 Adapter Link is disabled.\n");
+		lpfc_down_link(phba, pmb);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+			lpfc_printf_log(phba,
+			KERN_ERR, LOG_INIT,
+			"2599 Adapter failed to issue DOWN_LINK"
+			" mbox command rc 0x%x\n", rc);
+
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+		if (rc)
+			return rc;
+	}
+	/* MBOX buffer will be freed in mbox compl */
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
+	pmb->mbox_cmpl = lpfc_config_async_cmpl;
+	pmb->vport = phba->pport;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba,
+				KERN_ERR,
+				LOG_INIT,
+				"0456 Adapter failed to issue "
+				"ASYNCEVT_ENABLE mbox status x%x\n",
+				rc);
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
+	/* Get Option rom version */
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_dump_wakeup_param(phba, pmb);
+	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
+	pmb->vport = phba->pport;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+				"to get Option ROM version status x%x\n", rc);
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_init_link - Initialize the FC link
+ * @phba: pointer to lpfc hba data structure.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *		0 - success
+ *		Any other value - error
+ **/
+static int
+lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
+{
+	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *              0 - success
+ *              Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+			       uint32_t flag)
+{
+	struct lpfc_vport *vport = phba->pport;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+	pmb->vport = vport;
+
+	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+	     !(phba->lmt & LMT_1Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+	     !(phba->lmt & LMT_2Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+	     !(phba->lmt & LMT_4Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+	     !(phba->lmt & LMT_8Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+	     !(phba->lmt & LMT_10Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+	     !(phba->lmt & LMT_16Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
+	     !(phba->lmt & LMT_32Gb))) {
+		/* Reset link speed to auto */
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+			"1302 Invalid speed for this board:%d "
+			"Reset link speed to auto.\n",
+			phba->cfg_link_speed);
+			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+	}
+	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		lpfc_set_loopback_flag(phba);
+	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0498 Adapter failed to init, mbxCmd x%x "
+			"INIT_LINK, mbxStatus x%x\n",
+			mb->mbxCommand, mb->mbxStatus);
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr); /* flush */
+			/* Clear all pending interrupts */
+			writel(0xffffffff, phba->HAregaddr);
+			readl(phba->HAregaddr); /* flush */
+		}
+		phba->link_state = LPFC_HBA_ERROR;
+		if (rc != MBX_BUSY || flag == MBX_POLL)
+			mempool_free(pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
+	if (flag == MBX_POLL)
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_link - this routine downs the FC link
+ * @phba: pointer to lpfc hba data structure.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the DOWN_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use to stop the link.
+ *
+ * Return code
+ *		0 - success
+ *		Any other value - error
+ **/
+static int
+lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
+{
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_printf_log(phba,
+		KERN_ERR, LOG_INIT,
+		"0491 Adapter Link is disabled.\n");
+	lpfc_down_link(phba, pmb);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
+	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+		lpfc_printf_log(phba,
+		KERN_ERR, LOG_INIT,
+		"2522 Adapter failed to issue DOWN_LINK"
+		" mbox command rc 0x%x\n", rc);
+
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	if (flag == MBX_POLL)
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do LPFC uninitialization before the HBA is reset when
+ * bringing down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+int
+lpfc_hba_down_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		/* Disable interrupts */
+		writel(0, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	if (phba->pport->load_flag & FC_UNLOADING)
+		lpfc_cleanup_discovery_resources(phba->pport);
+	else {
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports != NULL)
+			for (i = 0; i <= phba->max_vports &&
+				vports[i] != NULL; i++)
+				lpfc_cleanup_discovery_resources(vports[i]);
+		lpfc_destroy_vport_work_array(phba, vports);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
+ * rspiocb which got deferred
+ *
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup completed slow path events after HBA is reset
+ * when bringing down the SLI Layer.
+ *
+ *
+ * Return codes
+ *   void.
+ **/
+static void
+lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq *rspiocbq;
+	struct hbq_dmabuf *dmabuf;
+	struct lpfc_cq_event *cq_event;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+	spin_unlock_irq(&phba->hbalock);
+
+	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
+		/* Get the response iocb from the head of work queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_queue_event,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+
+		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+		case CQE_CODE_COMPL_WQE:
+			rspiocbq = container_of(cq_event, struct lpfc_iocbq,
+						 cq_event);
+			lpfc_sli_release_iocbq(phba, rspiocbq);
+			break;
+		case CQE_CODE_RECEIVE:
+		case CQE_CODE_RECEIVE_V1:
+			dmabuf = container_of(cq_event, struct hbq_dmabuf,
+					      cq_event);
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		}
+	}
+}
+
+/**
+ * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup posted ELS buffers after the HBA is reset
+ * when bringing down the SLI Layer.
+ *
+ *
+ * Return codes
+ *   void.
+ **/
+static void
+lpfc_hba_free_post_buf(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_dmabuf *mp, *next_mp;
+	LIST_HEAD(buflist);
+	int count;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+		lpfc_sli_hbqbuf_free_all(phba);
+	else {
+		/* Cleanup preposted buffers on the ELS ring */
+		pring = &psli->sli3_ring[LPFC_ELS_RING];
+		spin_lock_irq(&phba->hbalock);
+		list_splice_init(&pring->postbufq, &buflist);
+		spin_unlock_irq(&phba->hbalock);
+
+		count = 0;
+		list_for_each_entry_safe(mp, next_mp, &buflist, list) {
+			list_del(&mp->list);
+			count++;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+
+		spin_lock_irq(&phba->hbalock);
+		pring->postbufq_cnt -= count;
+		spin_unlock_irq(&phba->hbalock);
+	}
+}
+
+/**
+ * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will cleanup the txcmplq after the HBA is reset when bringing
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   void
+ **/
+static void
+lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_queue *qp = NULL;
+	struct lpfc_sli_ring *pring;
+	LIST_HEAD(completions);
+	int i;
+
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		for (i = 0; i < psli->num_rings; i++) {
+			pring = &psli->sli3_ring[i];
+			spin_lock_irq(&phba->hbalock);
+			/* At this point in time the HBA is either reset or DOA
+			 * Nothing should be on txcmplq as it will
+			 * NEVER complete.
+			 */
+			list_splice_init(&pring->txcmplq, &completions);
+			pring->txcmplq_cnt = 0;
+			spin_unlock_irq(&phba->hbalock);
+
+			lpfc_sli_abort_iocb_ring(phba, pring);
+		}
+		/* Cancel all the IOCBs from the completions list */
+		lpfc_sli_cancel_iocbs(phba, &completions,
+				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+		return;
+	}
+	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+		pring = qp->pring;
+		if (!pring)
+			continue;
+		spin_lock_irq(&pring->ring_lock);
+		list_splice_init(&pring->txcmplq, &completions);
+		pring->txcmplq_cnt = 0;
+		spin_unlock_irq(&pring->ring_lock);
+		lpfc_sli_abort_iocb_ring(phba, pring);
+	}
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
+	int i;
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
+{
+	lpfc_hba_free_post_buf(phba);
+	lpfc_hba_clean_txcmplq(phba);
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *psb, *psb_next;
+	struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
+	LIST_HEAD(aborts);
+	LIST_HEAD(nvme_aborts);
+	LIST_HEAD(nvmet_aborts);
+	unsigned long iflag = 0;
+	struct lpfc_sglq *sglq_entry = NULL;
+
+
+	lpfc_sli_hbqbuf_free_all(phba);
+	lpfc_hba_clean_txcmplq(phba);
+
+	/* At this point in time the HBA is either reset or DOA. Either
+	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
+	 * on the lpfc_els_sgl_list so that it can either be freed if the
+	 * driver is unloading or reposted if the driver is restarting
+	 * the port.
+	 */
+	spin_lock_irq(&phba->hbalock);  /* required for lpfc_els_sgl_list and */
+					/* scsl_buf_list */
+	/* sgl_list_lock required because worker thread uses this
+	 * list.
+	 */
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_for_each_entry(sglq_entry,
+		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
+		sglq_entry->state = SGL_FREED;
+
+	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
+			&phba->sli4_hba.lpfc_els_sgl_list);
+
+
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	/* abts_scsi_buf_list_lock required because worker thread uses this
+	 * list.
+	 */
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+		spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+		list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+				 &aborts);
+		spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	}
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
+				 &nvme_aborts);
+		list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+				 &nvmet_aborts);
+		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+		psb->pCmd = NULL;
+		psb->status = IOSTAT_SUCCESS;
+	}
+	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+	list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
+			psb->pCmd = NULL;
+			psb->status = IOSTAT_SUCCESS;
+		}
+		spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+		list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
+		spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+
+		list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
+			ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
+			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+		}
+	}
+
+	lpfc_sli4_free_sp_events(phba);
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_post - Wrapper func for hba down post routine
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 routine for performing
+ * uninitialization after the HBA is reset when bring down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+int
+lpfc_hba_down_post(struct lpfc_hba *phba)
+{
+	return (*phba->lpfc_hba_down_post)(phba);
+}
+
+/**
+ * lpfc_hb_timeout - The HBA-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the HBA-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a HBA timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
+ * be performed in the timeout handler and the HBA timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_hb_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	phba = (struct lpfc_hba *)ptr;
+
+	/* Check for heart beat timeout conditions */
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_HB_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+	/* Tell the worker thread there is work to do */
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_rrq_timeout - The RRQ-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the RRQ-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_rrq_handler. Any periodical operations will
+ * be performed in the timeout handler and the RRQ timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_rrq_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	unsigned long iflag;
+
+	phba = (struct lpfc_hba *)ptr;
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		phba->hba_flag |= HBA_RRQ_ACTIVE;
+	else
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the callback function to the lpfc heart-beat mailbox command.
+ * If configured, the lpfc driver issues the heart-beat mailbox command to
+ * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
+ * heart-beat mailbox command is issued, the driver shall set up heart-beat
+ * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
+ * heart-beat outstanding state. Once the mailbox command comes back and
+ * no error conditions detected, the heart-beat mailbox command timer is
+ * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
+ * state is cleared for the next heart-beat. If the timer expired with the
+ * heart-beat outstanding state set, the driver will put the HBA offline.
+ **/
+static void
+lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+	unsigned long drvr_flag;
+
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	phba->hb_outstanding = 0;
+	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+	/* Check and reset heart-beat timer is necessary */
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
+		!(phba->link_state == LPFC_HBA_ERROR) &&
+		!(phba->pport->load_flag & FC_UNLOADING))
+		mod_timer(&phba->hb_tmofunc,
+			  jiffies +
+			  msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+	return;
+}
+
+/**
+ * lpfc_hb_timeout_handler - The HBA-timer timeout handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is the actual HBA-timer timeout handler to be invoked by the worker
+ * thread whenever the HBA timer fired and HBA-timeout event posted. This
+ * handler performs any periodic operations needed for the device. If such
+ * periodic event has already been attended to either in the interrupt handler
+ * or by processing slow-ring or fast-ring events within the HBA-timer
+ * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
+ * the timer for the next timeout period. If lpfc heart-beat mailbox command
+ * is configured and there is no heart-beat mailbox command outstanding, a
+ * heart-beat mailbox is issued and timer set properly. Otherwise, if there
+ * has been a heart-beat mailbox command outstanding, the HBA shall be put
+ * to offline.
+ **/
+void
+lpfc_hb_timeout_handler(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	LPFC_MBOXQ_t *pmboxq;
+	struct lpfc_dmabuf *buf_ptr;
+	int retval, i;
+	struct lpfc_sli *psli = &phba->sli;
+	LIST_HEAD(completions);
+	struct lpfc_queue *qp;
+	unsigned long time_elapsed;
+	uint32_t tick_cqe, max_cqe, val;
+	uint64_t tot, data1, data2, data3;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_register reg_data;
+	void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			lpfc_rcv_seq_check_edtov(vports[i]);
+			lpfc_fdmi_num_disc_check(vports[i]);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+		(phba->pport->load_flag & FC_UNLOADING) ||
+		(phba->pport->fc_flag & FC_OFFLINE_MODE))
+		return;
+
+	if (phba->cfg_auto_imax) {
+		if (!phba->last_eqdelay_time) {
+			phba->last_eqdelay_time = jiffies;
+			goto skip_eqdelay;
+		}
+		time_elapsed = jiffies - phba->last_eqdelay_time;
+		phba->last_eqdelay_time = jiffies;
+
+		tot = 0xffff;
+		/* Check outstanding IO count */
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+			if (phba->nvmet_support) {
+				tgtp = phba->targetport->private;
+				/* Calculate outstanding IOs */
+				tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
+				tot += atomic_read(&tgtp->xmt_fcp_release);
+				tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
+			} else {
+				tot = atomic_read(&phba->fc4NvmeIoCmpls);
+				data1 = atomic_read(
+					&phba->fc4NvmeInputRequests);
+				data2 = atomic_read(
+					&phba->fc4NvmeOutputRequests);
+				data3 = atomic_read(
+					&phba->fc4NvmeControlRequests);
+				tot =  (data1 + data2 + data3) - tot;
+			}
+		}
+
+		/* Interrupts per sec per EQ */
+		val = phba->cfg_fcp_imax / phba->io_channel_irqs;
+		tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
+
+		/* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
+		max_cqe = time_elapsed * tick_cqe;
+
+		for (i = 0; i < phba->io_channel_irqs; i++) {
+			/* Fast-path EQ */
+			qp = phba->sli4_hba.hba_eq[i];
+			if (!qp)
+				continue;
+
+			/* Use no EQ delay if we don't have many outstanding
+			 * IOs, or if we are only processing 1 CQE/ISR or less.
+			 * Otherwise, assume we can process up to lpfc_fcp_imax
+			 * interrupts per HBA.
+			 */
+			if (tot < LPFC_NODELAY_MAX_IO ||
+			    qp->EQ_cqe_cnt <= max_cqe)
+				val = 0;
+			else
+				val = phba->cfg_fcp_imax;
+
+			if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+				/* Use EQ Delay Register method */
+
+				/* Convert for EQ Delay register */
+				if (val) {
+					/* First, interrupts per sec per EQ */
+					val = phba->cfg_fcp_imax /
+						phba->io_channel_irqs;
+
+					/* us delay between each interrupt */
+					val = LPFC_SEC_TO_USEC / val;
+				}
+				if (val != qp->q_mode) {
+					reg_data.word0 = 0;
+					bf_set(lpfc_sliport_eqdelay_id,
+					       &reg_data, qp->queue_id);
+					bf_set(lpfc_sliport_eqdelay_delay,
+					       &reg_data, val);
+					writel(reg_data.word0, eqdreg);
+				}
+			} else {
+				/* Use mbox command method */
+				if (val != qp->q_mode)
+					lpfc_modify_hba_eq_delay(phba, i,
+								 1, val);
+			}
+
+			/*
+			 * val is cfg_fcp_imax or 0 for mbox delay or us delay
+			 * between interrupts for EQDR.
+			 */
+			qp->q_mode = val;
+			qp->EQ_cqe_cnt = 0;
+		}
+	}
+
+skip_eqdelay:
+	spin_lock_irq(&phba->pport->work_port_lock);
+
+	if (time_after(phba->last_completion_time +
+			msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+			jiffies)) {
+		spin_unlock_irq(&phba->pport->work_port_lock);
+		if (!phba->hb_outstanding)
+			mod_timer(&phba->hb_tmofunc,
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+		else
+			mod_timer(&phba->hb_tmofunc,
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+		return;
+	}
+	spin_unlock_irq(&phba->pport->work_port_lock);
+
+	if (phba->elsbuf_cnt &&
+		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
+		spin_lock_irq(&phba->hbalock);
+		list_splice_init(&phba->elsbuf, &completions);
+		phba->elsbuf_cnt = 0;
+		phba->elsbuf_prev_cnt = 0;
+		spin_unlock_irq(&phba->hbalock);
+
+		while (!list_empty(&completions)) {
+			list_remove_head(&completions, buf_ptr,
+				struct lpfc_dmabuf, list);
+			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+			kfree(buf_ptr);
+		}
+	}
+	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
+
+	/* If there is no heart beat outstanding, issue a heartbeat command */
+	if (phba->cfg_enable_hba_heartbeat) {
+		if (!phba->hb_outstanding) {
+			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
+				(list_empty(&psli->mboxq))) {
+				pmboxq = mempool_alloc(phba->mbox_mem_pool,
+							GFP_KERNEL);
+				if (!pmboxq) {
+					mod_timer(&phba->hb_tmofunc,
+						 jiffies +
+						 msecs_to_jiffies(1000 *
+						 LPFC_HB_MBOX_INTERVAL));
+					return;
+				}
+
+				lpfc_heart_beat(phba, pmboxq);
+				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+				pmboxq->vport = phba->pport;
+				retval = lpfc_sli_issue_mbox(phba, pmboxq,
+						MBX_NOWAIT);
+
+				if (retval != MBX_BUSY &&
+					retval != MBX_SUCCESS) {
+					mempool_free(pmboxq,
+							phba->mbox_mem_pool);
+					mod_timer(&phba->hb_tmofunc,
+						jiffies +
+						msecs_to_jiffies(1000 *
+						LPFC_HB_MBOX_INTERVAL));
+					return;
+				}
+				phba->skipped_hb = 0;
+				phba->hb_outstanding = 1;
+			} else if (time_before_eq(phba->last_completion_time,
+					phba->skipped_hb)) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2857 Last completion time not "
+					" updated in %d ms\n",
+					jiffies_to_msecs(jiffies
+						 - phba->last_completion_time));
+			} else
+				phba->skipped_hb = jiffies;
+
+			mod_timer(&phba->hb_tmofunc,
+				 jiffies +
+				 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+			return;
+		} else {
+			/*
+			* If heart beat timeout called with hb_outstanding set
+			* we need to give the hb mailbox cmd a chance to
+			* complete or TMO.
+			*/
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"0459 Adapter heartbeat still out"
+					"standing:last compl time was %d ms.\n",
+					jiffies_to_msecs(jiffies
+						 - phba->last_completion_time));
+			mod_timer(&phba->hb_tmofunc,
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+		}
+	} else {
+			mod_timer(&phba->hb_tmofunc,
+				jiffies +
+				msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+	}
+}
+
+/**
+ * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring the HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_offline_eratt(struct lpfc_hba *phba)
+{
+	struct lpfc_sli   *psli = &phba->sli;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+
+	lpfc_offline(phba);
+	lpfc_reset_barrier(phba);
+	spin_lock_irq(&phba->hbalock);
+	lpfc_sli_brdreset(phba);
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_hba_down_post(phba);
+	lpfc_sli_brdready(phba, HS_MBRDY);
+	lpfc_unblock_mgmt_io(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+	return;
+}
+
+/**
+ * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring a SLI4 HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+void
+lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+{
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_HBA_ERROR;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+	lpfc_offline(phba);
+	lpfc_hba_down_post(phba);
+	lpfc_unblock_mgmt_io(phba);
+}
+
+/**
+ * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the deferred HBA hardware error
+ * conditions. This type of error is indicated by HBA by setting ER1
+ * and another ER bit in the host status register. The driver will
+ * wait until the ER1 bit clears before handling the error condition.
+ **/
+static void
+lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
+{
+	uint32_t old_host_status = phba->work_hs;
+	struct lpfc_sli *psli = &phba->sli;
+
+	/* If the pci channel is offline, ignore possible errors,
+	 * since we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev)) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~DEFER_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		"0479 Deferred Adapter Hardware Error "
+		"Data: x%x x%x x%x\n",
+		phba->work_hs,
+		phba->work_status[0], phba->work_status[1]);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+
+	/*
+	 * Firmware stops when it triggred erratt. That could cause the I/Os
+	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
+	 * SCSI layer retry it after re-establishing link.
+	 */
+	lpfc_sli_abort_fcp_rings(phba);
+
+	/*
+	 * There was a firmware error. Take the hba offline and then
+	 * attempt to restart it.
+	 */
+	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+	lpfc_offline(phba);
+
+	/* Wait for the ER1 bit to clear.*/
+	while (phba->work_hs & HS_FFER1) {
+		msleep(100);
+		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
+			phba->work_hs = UNPLUG_ERR ;
+			break;
+		}
+		/* If driver is unloading let the worker thread continue */
+		if (phba->pport->load_flag & FC_UNLOADING) {
+			phba->work_hs = 0;
+			break;
+		}
+	}
+
+	/*
+	 * This is to ptrotect against a race condition in which
+	 * first write to the host attention register clear the
+	 * host status register.
+	 */
+	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
+		phba->work_hs = old_host_status & ~HS_FFER1;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~DEFER_ERATT;
+	spin_unlock_irq(&phba->hbalock);
+	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+}
+
+static void
+lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
+{
+	struct lpfc_board_event_header board_event;
+	struct Scsi_Host *shost;
+
+	board_event.event_type = FC_REG_BOARD_EVENT;
+	board_event.subcategory = LPFC_EVENT_PORTINTERR;
+	shost = lpfc_shost_from_vport(phba->pport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(board_event),
+				  (char *) &board_event,
+				  LPFC_NL_VENDOR_ID);
+}
+
+/**
+ * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the following HBA hardware error
+ * conditions:
+ * 1 - HBA error attention interrupt
+ * 2 - DMA ring index out of range
+ * 3 - Mailbox command came back as unknown
+ **/
+static void
+lpfc_handle_eratt_s3(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_sli   *psli = &phba->sli;
+	uint32_t event_data;
+	unsigned long temperature;
+	struct temp_event temp_event_data;
+	struct Scsi_Host  *shost;
+
+	/* If the pci channel is offline, ignore possible errors,
+	 * since we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev)) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~DEFER_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* If resets are disabled then leave the HBA alone and return */
+	if (!phba->cfg_enable_hba_reset)
+		return;
+
+	/* Send an internal error event to mgmt application */
+	lpfc_board_errevt_to_mgmt(phba);
+
+	if (phba->hba_flag & DEFER_ERATT)
+		lpfc_handle_deferred_eratt(phba);
+
+	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
+		if (phba->work_hs & HS_FFER6)
+			/* Re-establishing Link */
+			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+					"1301 Re-establishing Link "
+					"Data: x%x x%x x%x\n",
+					phba->work_hs, phba->work_status[0],
+					phba->work_status[1]);
+		if (phba->work_hs & HS_FFER8)
+			/* Device Zeroization */
+			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+					"2861 Host Authentication device "
+					"zeroization Data:x%x x%x x%x\n",
+					phba->work_hs, phba->work_status[0],
+					phba->work_status[1]);
+
+		spin_lock_irq(&phba->hbalock);
+		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+
+		/*
+		* Firmware stops when it triggled erratt with HS_FFER6.
+		* That could cause the I/Os dropped by the firmware.
+		* Error iocb (I/O) on txcmplq and let the SCSI layer
+		* retry it after re-establishing link.
+		*/
+		lpfc_sli_abort_fcp_rings(phba);
+
+		/*
+		 * There was a firmware error.  Take the hba offline and then
+		 * attempt to restart it.
+		 */
+		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+		lpfc_offline(phba);
+		lpfc_sli_brdrestart(phba);
+		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return;
+		}
+		lpfc_unblock_mgmt_io(phba);
+	} else if (phba->work_hs & HS_CRIT_TEMP) {
+		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
+		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+		temp_event_data.event_code = LPFC_CRIT_TEMP;
+		temp_event_data.data = (uint32_t)temperature;
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0406 Adapter maximum temperature exceeded "
+				"(%ld), taking this port offline "
+				"Data: x%x x%x x%x\n",
+				temperature, phba->work_hs,
+				phba->work_status[0], phba->work_status[1]);
+
+		shost = lpfc_shost_from_vport(phba->pport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+					  sizeof(temp_event_data),
+					  (char *) &temp_event_data,
+					  SCSI_NL_VID_TYPE_PCI
+					  | PCI_VENDOR_ID_EMULEX);
+
+		spin_lock_irq(&phba->hbalock);
+		phba->over_temp_state = HBA_OVER_TEMP;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_offline_eratt(phba);
+
+	} else {
+		/* The if clause above forces this code path when the status
+		 * failure is a value other than FFER6. Do not call the offline
+		 * twice. This is the adapter hardware error path.
+		 */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0457 Adapter Hardware Error "
+				"Data: x%x x%x x%x\n",
+				phba->work_hs,
+				phba->work_status[0], phba->work_status[1]);
+
+		event_data = FC_REG_DUMP_EVENT;
+		shost = lpfc_shost_from_vport(vport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+				sizeof(event_data), (char *) &event_data,
+				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+		lpfc_offline_eratt(phba);
+	}
+	return;
+}
+
+/**
+ * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
+ * @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox shutdown action.
+ *
+ * This routine is invoked to perform an SLI4 port PCI function reset in
+ * response to port status register polling attention. It waits for port
+ * status register (ERR, RDY, RN) bits before proceeding with function reset.
+ * During this process, interrupt vectors are freed and later requested
+ * for handling possible port resource change.
+ **/
+static int
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
+			    bool en_rn_msg)
+{
+	int rc;
+	uint32_t intr_mode;
+
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		/*
+		 * On error status condition, driver need to wait for port
+		 * ready before performing reset.
+		 */
+		rc = lpfc_sli4_pdev_status_reg_wait(phba);
+		if (rc)
+			return rc;
+	}
+
+	/* need reset: attempt for port recovery */
+	if (en_rn_msg)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2887 Reset Needed: Attempting Port "
+				"Recovery...\n");
+	lpfc_offline_prep(phba, mbx_action);
+	lpfc_offline(phba);
+	/* release interrupt for possible resource change */
+	lpfc_sli4_disable_intr(phba);
+	rc = lpfc_sli_brdrestart(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6309 Failed to restart board\n");
+		return rc;
+	}
+	/* request and enable interrupt */
+	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3175 Failed to enable interrupt\n");
+		return -EIO;
+	}
+	phba->intr_mode = intr_mode;
+	rc = lpfc_online(phba);
+	if (rc == 0)
+		lpfc_unblock_mgmt_io(phba);
+
+	return rc;
+}
+
+/**
+ * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the SLI4 HBA hardware error attention
+ * conditions.
+ **/
+static void
+lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	uint32_t event_data;
+	struct Scsi_Host *shost;
+	uint32_t if_type;
+	struct lpfc_register portstat_reg = {0};
+	uint32_t reg_err1, reg_err2;
+	uint32_t uerrlo_reg, uemasklo_reg;
+	uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
+	bool en_rn_msg = true;
+	struct temp_event temp_event_data;
+	struct lpfc_register portsmphr_reg;
+	int rc, i;
+
+	/* If the pci channel is offline, ignore possible errors, since
+	 * we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev))
+		return;
+
+	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		pci_rd_rc1 = lpfc_readl(
+				phba->sli4_hba.u.if_type0.UERRLOregaddr,
+				&uerrlo_reg);
+		pci_rd_rc2 = lpfc_readl(
+				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+				&uemasklo_reg);
+		/* consider PCI bus read error as pci_channel_offline */
+		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+			return;
+		if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
+			lpfc_sli4_offline_eratt(phba);
+			return;
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"7623 Checking UE recoverable");
+
+		for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
+			if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+				       &portsmphr_reg.word0))
+				continue;
+
+			smphr_port_status = bf_get(lpfc_port_smphr_port_status,
+						   &portsmphr_reg);
+			if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
+			    LPFC_PORT_SEM_UE_RECOVERABLE)
+				break;
+			/*Sleep for 1Sec, before checking SEMAPHORE */
+			msleep(1000);
+		}
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"4827 smphr_port_status x%x : Waited %dSec",
+				smphr_port_status, i);
+
+		/* Recoverable UE, reset the HBA device */
+		if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
+		    LPFC_PORT_SEM_UE_RECOVERABLE) {
+			for (i = 0; i < 20; i++) {
+				msleep(1000);
+				if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+				    &portsmphr_reg.word0) &&
+				    (LPFC_POST_STAGE_PORT_READY ==
+				     bf_get(lpfc_port_smphr_port_status,
+				     &portsmphr_reg))) {
+					rc = lpfc_sli4_port_sta_fn_reset(phba,
+						LPFC_MBX_NO_WAIT, en_rn_msg);
+					if (rc == 0)
+						return;
+					lpfc_printf_log(phba,
+						KERN_ERR, LOG_INIT,
+						"4215 Failed to recover UE");
+					break;
+				}
+			}
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"7624 Firmware not ready: Failing UE recovery,"
+				" waited %dSec", i);
+		lpfc_sli4_offline_eratt(phba);
+		break;
+
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		pci_rd_rc1 = lpfc_readl(
+				phba->sli4_hba.u.if_type2.STATUSregaddr,
+				&portstat_reg.word0);
+		/* consider PCI bus read error as pci_channel_offline */
+		if (pci_rd_rc1 == -EIO) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3151 PCI bus read access failure: x%x\n",
+				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+			return;
+		}
+		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2889 Port Overtemperature event, "
+				"taking port offline Data: x%x x%x\n",
+				reg_err1, reg_err2);
+
+			phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
+			temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+			temp_event_data.event_code = LPFC_CRIT_TEMP;
+			temp_event_data.data = 0xFFFFFFFF;
+
+			shost = lpfc_shost_from_vport(phba->pport);
+			fc_host_post_vendor_event(shost, fc_get_event_number(),
+						  sizeof(temp_event_data),
+						  (char *)&temp_event_data,
+						  SCSI_NL_VID_TYPE_PCI
+						  | PCI_VENDOR_ID_EMULEX);
+
+			spin_lock_irq(&phba->hbalock);
+			phba->over_temp_state = HBA_OVER_TEMP;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli4_offline_eratt(phba);
+			return;
+		}
+		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3143 Port Down: Firmware Update "
+					"Detected\n");
+			en_rn_msg = false;
+		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3144 Port Down: Debug Dump\n");
+		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3145 Port Down: Provisioning\n");
+
+		/* If resets are disabled then leave the HBA alone and return */
+		if (!phba->cfg_enable_hba_reset)
+			return;
+
+		/* Check port status register for function reset */
+		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
+				en_rn_msg);
+		if (rc == 0) {
+			/* don't report event on forced debug dump */
+			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+				return;
+			else
+				break;
+		}
+		/* fall through for not able to recover */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3152 Unrecoverable error, bring the port "
+				"offline\n");
+		lpfc_sli4_offline_eratt(phba);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"3123 Report dump event to upper layer\n");
+	/* Send an internal error event to mgmt application */
+	lpfc_board_errevt_to_mgmt(phba);
+
+	event_data = FC_REG_DUMP_EVENT;
+	shost = lpfc_shost_from_vport(vport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(event_data), (char *) &event_data,
+				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+}
+
+/**
+ * lpfc_handle_eratt - Wrapper func for handling hba error attention
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba error attention handling
+ * routine from the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+void
+lpfc_handle_eratt(struct lpfc_hba *phba)
+{
+	(*phba->lpfc_handle_eratt)(phba);
+}
+
+/**
+ * lpfc_handle_latt - The HBA link event handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked from the worker thread to handle a HBA host
+ * attention link event. SLI3 only.
+ **/
+void
+lpfc_handle_latt(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_sli   *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	volatile uint32_t control;
+	struct lpfc_dmabuf *mp;
+	int rc = 0;
+
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		rc = 1;
+		goto lpfc_handle_latt_err_exit;
+	}
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		rc = 2;
+		goto lpfc_handle_latt_free_pmb;
+	}
+
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		rc = 3;
+		goto lpfc_handle_latt_free_mp;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	psli->slistat.link_event++;
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = vport;
+	/* Block ELS IOCBs until we have processed this mbox command */
+	phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		rc = 4;
+		goto lpfc_handle_latt_free_mbuf;
+	}
+
+	/* Clear Link Attention in HA REG */
+	spin_lock_irq(&phba->hbalock);
+	writel(HA_LATT, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+
+lpfc_handle_latt_free_mbuf:
+	phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+lpfc_handle_latt_free_mp:
+	kfree(mp);
+lpfc_handle_latt_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+lpfc_handle_latt_err_exit:
+	/* Enable Link attention interrupts */
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	control = readl(phba->HCregaddr);
+	control |= HC_LAINT_ENA;
+	writel(control, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+
+	/* Clear Link Attention in HA REG */
+	writel(HA_LATT, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_linkdown(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
+
+	return;
+}
+
+/**
+ * lpfc_parse_vpd - Parse VPD (Vital Product Data)
+ * @phba: pointer to lpfc hba data structure.
+ * @vpd: pointer to the vital product data.
+ * @len: length of the vital product data in bytes.
+ *
+ * This routine parses the Vital Product Data (VPD). The VPD is treated as
+ * an array of characters. In this routine, the ModelName, ProgramType, and
+ * ModelDesc, etc. fields of the phba data structure will be populated.
+ *
+ * Return codes
+ *   0 - pointer to the VPD passed in is NULL
+ *   1 - success
+ **/
+int
+lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
+{
+	uint8_t lenlo, lenhi;
+	int Length;
+	int i, j;
+	int finished = 0;
+	int index = 0;
+
+	if (!vpd)
+		return 0;
+
+	/* Vital Product */
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0455 Vital Product Data: x%x x%x x%x x%x\n",
+			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
+			(uint32_t) vpd[3]);
+	while (!finished && (index < (len - 4))) {
+		switch (vpd[index]) {
+		case 0x82:
+		case 0x91:
+			index += 1;
+			lenlo = vpd[index];
+			index += 1;
+			lenhi = vpd[index];
+			index += 1;
+			i = ((((unsigned short)lenhi) << 8) + lenlo);
+			index += i;
+			break;
+		case 0x90:
+			index += 1;
+			lenlo = vpd[index];
+			index += 1;
+			lenhi = vpd[index];
+			index += 1;
+			Length = ((((unsigned short)lenhi) << 8) + lenlo);
+			if (Length > len - index)
+				Length = len - index;
+			while (Length > 0) {
+			/* Look for Serial Number */
+			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->SerialNumber[j++] = vpd[index++];
+					if (j == 31)
+						break;
+				}
+				phba->SerialNumber[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
+				phba->vpd_flag |= VPD_MODEL_DESC;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->ModelDesc[j++] = vpd[index++];
+					if (j == 255)
+						break;
+				}
+				phba->ModelDesc[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
+				phba->vpd_flag |= VPD_MODEL_NAME;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->ModelName[j++] = vpd[index++];
+					if (j == 79)
+						break;
+				}
+				phba->ModelName[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
+				phba->vpd_flag |= VPD_PROGRAM_TYPE;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->ProgramType[j++] = vpd[index++];
+					if (j == 255)
+						break;
+				}
+				phba->ProgramType[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
+				phba->vpd_flag |= VPD_PORT;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					if ((phba->sli_rev == LPFC_SLI_REV4) &&
+					    (phba->sli4_hba.pport_name_sta ==
+					     LPFC_SLI4_PPNAME_GET)) {
+						j++;
+						index++;
+					} else
+						phba->Port[j++] = vpd[index++];
+					if (j == 19)
+						break;
+				}
+				if ((phba->sli_rev != LPFC_SLI_REV4) ||
+				    (phba->sli4_hba.pport_name_sta ==
+				     LPFC_SLI4_PPNAME_NON))
+					phba->Port[j] = 0;
+				continue;
+			}
+			else {
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				index += i;
+				Length -= (3 + i);
+			}
+		}
+		finished = 0;
+		break;
+		case 0x78:
+			finished = 1;
+			break;
+		default:
+			index ++;
+			break;
+		}
+	}
+
+	return(1);
+}
+
+/**
+ * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+	lpfc_vpd_t *vp;
+	uint16_t dev_id = phba->pcidev->device;
+	int max_speed;
+	int GE = 0;
+	int oneConnect = 0; /* default is not a oneConnect */
+	struct {
+		char *name;
+		char *bus;
+		char *function;
+	} m = {"<Unknown>", "", ""};
+
+	if (mdp && mdp[0] != '\0'
+		&& descp && descp[0] != '\0')
+		return;
+
+	if (phba->lmt & LMT_32Gb)
+		max_speed = 32;
+	else if (phba->lmt & LMT_16Gb)
+		max_speed = 16;
+	else if (phba->lmt & LMT_10Gb)
+		max_speed = 10;
+	else if (phba->lmt & LMT_8Gb)
+		max_speed = 8;
+	else if (phba->lmt & LMT_4Gb)
+		max_speed = 4;
+	else if (phba->lmt & LMT_2Gb)
+		max_speed = 2;
+	else if (phba->lmt & LMT_1Gb)
+		max_speed = 1;
+	else
+		max_speed = 0;
+
+	vp = &phba->vpd;
+
+	switch (dev_id) {
+	case PCI_DEVICE_ID_FIREFLY:
+		m = (typeof(m)){"LP6000", "PCI",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SUPERFLY:
+		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
+			m = (typeof(m)){"LP7000", "PCI", ""};
+		else
+			m = (typeof(m)){"LP7000E", "PCI", ""};
+		m.function = "Obsolete, Unsupported Fibre Channel Adapter";
+		break;
+	case PCI_DEVICE_ID_DRAGONFLY:
+		m = (typeof(m)){"LP8000", "PCI",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_CENTAUR:
+		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
+			m = (typeof(m)){"LP9002", "PCI", ""};
+		else
+			m = (typeof(m)){"LP9000", "PCI", ""};
+		m.function = "Obsolete, Unsupported Fibre Channel Adapter";
+		break;
+	case PCI_DEVICE_ID_RFLY:
+		m = (typeof(m)){"LP952", "PCI",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PEGASUS:
+		m = (typeof(m)){"LP9802", "PCI-X",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_THOR:
+		m = (typeof(m)){"LP10000", "PCI-X",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_VIPER:
+		m = (typeof(m)){"LPX1000",  "PCI-X",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PFLY:
+		m = (typeof(m)){"LP982", "PCI-X",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_TFLY:
+		m = (typeof(m)){"LP1050", "PCI-X",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HELIOS:
+		m = (typeof(m)){"LP11000", "PCI-X2",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HELIOS_SCSP:
+		m = (typeof(m)){"LP11000-SP", "PCI-X2",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HELIOS_DCSP:
+		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_NEPTUNE:
+		m = (typeof(m)){"LPe1000", "PCIe",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_NEPTUNE_SCSP:
+		m = (typeof(m)){"LPe1000-SP", "PCIe",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_NEPTUNE_DCSP:
+		m = (typeof(m)){"LPe1002-SP", "PCIe",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_BMID:
+		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_BSMB:
+		m = (typeof(m)){"LP111", "PCI-X2",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZEPHYR:
+		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZEPHYR_SCSP:
+		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZEPHYR_DCSP:
+		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
+		GE = 1;
+		break;
+	case PCI_DEVICE_ID_ZMID:
+		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZSMB:
+		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LP101:
+		m = (typeof(m)){"LP101", "PCI-X",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LP10000S:
+		m = (typeof(m)){"LP10000-S", "PCI",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LP11000S:
+		m = (typeof(m)){"LP11000-S", "PCI-X2",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LPE11000S:
+		m = (typeof(m)){"LPe11000-S", "PCIe",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT:
+		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_MID:
+		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_SMB:
+		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_DCSP:
+		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_SCSP:
+		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_S:
+		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HORNET:
+		m = (typeof(m)){"LP21000", "PCIe",
+				"Obsolete, Unsupported FCoE Adapter"};
+		GE = 1;
+		break;
+	case PCI_DEVICE_ID_PROTEUS_VF:
+		m = (typeof(m)){"LPev12000", "PCIe IOV",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PROTEUS_PF:
+		m = (typeof(m)){"LPev12000", "PCIe IOV",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PROTEUS_S:
+		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_TIGERSHARK:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_TOMCAT:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_FALCON:
+		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
+				"EmulexSecure Fibre"};
+		break;
+	case PCI_DEVICE_ID_BALIUS:
+		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FC:
+		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FC_VF:
+		m = (typeof(m)){"LPe16000", "PCIe",
+				"Obsolete, Unsupported Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FCOE:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FCOE_VF:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe15100", "PCIe",
+				"Obsolete, Unsupported FCoE"};
+		break;
+	case PCI_DEVICE_ID_LANCER_G6_FC:
+		m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SKYHAWK:
+	case PCI_DEVICE_ID_SKYHAWK_VF:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
+		break;
+	default:
+		m = (typeof(m)){"Unknown", "", ""};
+		break;
+	}
+
+	if (mdp && mdp[0] == '\0')
+		snprintf(mdp, 79,"%s", m.name);
+	/*
+	 * oneConnect hba requires special processing, they are all initiators
+	 * and we put the port number on the end
+	 */
+	if (descp && descp[0] == '\0') {
+		if (oneConnect)
+			snprintf(descp, 255,
+				"Emulex OneConnect %s, %s Initiator %s",
+				m.name, m.function,
+				phba->Port);
+		else if (max_speed == 0)
+			snprintf(descp, 255,
+				"Emulex %s %s %s",
+				m.name, m.bus, m.function);
+		else
+			snprintf(descp, 255,
+				"Emulex %s %d%s %s %s",
+				m.name, max_speed, (GE) ? "GE" : "Gb",
+				m.bus, m.function);
+	}
+}
+
+/**
+ * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a IOCB ring.
+ * @cnt: the number of IOCBs to be posted to the IOCB ring.
+ *
+ * This routine posts a given number of IOCBs with the associated DMA buffer
+ * descriptors specified by the cnt argument to the given IOCB ring.
+ *
+ * Return codes
+ *   The number of IOCBs NOT able to be posted to the IOCB ring.
+ **/
+int
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+{
+	IOCB_t *icmd;
+	struct lpfc_iocbq *iocb;
+	struct lpfc_dmabuf *mp1, *mp2;
+
+	cnt += pring->missbufcnt;
+
+	/* While there are buffers to post */
+	while (cnt > 0) {
+		/* Allocate buffer for  command iocb */
+		iocb = lpfc_sli_get_iocbq(phba);
+		if (iocb == NULL) {
+			pring->missbufcnt = cnt;
+			return cnt;
+		}
+		icmd = &iocb->iocb;
+
+		/* 2 buffers can be posted per command */
+		/* Allocate buffer to post */
+		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+		if (mp1)
+		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
+		if (!mp1 || !mp1->virt) {
+			kfree(mp1);
+			lpfc_sli_release_iocbq(phba, iocb);
+			pring->missbufcnt = cnt;
+			return cnt;
+		}
+
+		INIT_LIST_HEAD(&mp1->list);
+		/* Allocate buffer to post */
+		if (cnt > 1) {
+			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+			if (mp2)
+				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+							    &mp2->phys);
+			if (!mp2 || !mp2->virt) {
+				kfree(mp2);
+				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+				kfree(mp1);
+				lpfc_sli_release_iocbq(phba, iocb);
+				pring->missbufcnt = cnt;
+				return cnt;
+			}
+
+			INIT_LIST_HEAD(&mp2->list);
+		} else {
+			mp2 = NULL;
+		}
+
+		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
+		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
+		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
+		icmd->ulpBdeCount = 1;
+		cnt--;
+		if (mp2) {
+			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
+			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
+			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
+			cnt--;
+			icmd->ulpBdeCount = 2;
+		}
+
+		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
+		icmd->ulpLe = 1;
+
+		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
+		    IOCB_ERROR) {
+			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+			kfree(mp1);
+			cnt++;
+			if (mp2) {
+				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
+				kfree(mp2);
+				cnt++;
+			}
+			lpfc_sli_release_iocbq(phba, iocb);
+			pring->missbufcnt = cnt;
+			return cnt;
+		}
+		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
+		if (mp2)
+			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
+	}
+	pring->missbufcnt = 0;
+	return 0;
+}
+
+/**
+ * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine posts initial receive IOCB buffers to the ELS ring. The
+ * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
+ * set to 64 IOCBs. SLI3 only.
+ *
+ * Return codes
+ *   0 - success (currently always success)
+ **/
+static int
+lpfc_post_rcv_buf(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+
+	/* Ring 0, ELS / CT buffers */
+	lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+	/* Ring 2 - FCP no buffers needed */
+
+	return 0;
+}
+
+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
+
+/**
+ * lpfc_sha_init - Set up initial array of hash table entries
+ * @HashResultPointer: pointer to an array as hash table.
+ *
+ * This routine sets up the initial values to the array of hash table entries
+ * for the LC HBAs.
+ **/
+static void
+lpfc_sha_init(uint32_t * HashResultPointer)
+{
+	HashResultPointer[0] = 0x67452301;
+	HashResultPointer[1] = 0xEFCDAB89;
+	HashResultPointer[2] = 0x98BADCFE;
+	HashResultPointer[3] = 0x10325476;
+	HashResultPointer[4] = 0xC3D2E1F0;
+}
+
+/**
+ * lpfc_sha_iterate - Iterate initial hash table with the working hash table
+ * @HashResultPointer: pointer to an initial/result hash table.
+ * @HashWorkingPointer: pointer to an working hash table.
+ *
+ * This routine iterates an initial hash table pointed by @HashResultPointer
+ * with the values from the working hash table pointeed by @HashWorkingPointer.
+ * The results are putting back to the initial hash table, returned through
+ * the @HashResultPointer as the result hash table.
+ **/
+static void
+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
+{
+	int t;
+	uint32_t TEMP;
+	uint32_t A, B, C, D, E;
+	t = 16;
+	do {
+		HashWorkingPointer[t] =
+		    S(1,
+		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
+								     8] ^
+		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
+	} while (++t <= 79);
+	t = 0;
+	A = HashResultPointer[0];
+	B = HashResultPointer[1];
+	C = HashResultPointer[2];
+	D = HashResultPointer[3];
+	E = HashResultPointer[4];
+
+	do {
+		if (t < 20) {
+			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
+		} else if (t < 40) {
+			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
+		} else if (t < 60) {
+			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
+		} else {
+			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
+		}
+		TEMP += S(5, A) + E + HashWorkingPointer[t];
+		E = D;
+		D = C;
+		C = S(30, B);
+		B = A;
+		A = TEMP;
+	} while (++t <= 79);
+
+	HashResultPointer[0] += A;
+	HashResultPointer[1] += B;
+	HashResultPointer[2] += C;
+	HashResultPointer[3] += D;
+	HashResultPointer[4] += E;
+
+}
+
+/**
+ * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
+ * @RandomChallenge: pointer to the entry of host challenge random number array.
+ * @HashWorking: pointer to the entry of the working hash array.
+ *
+ * This routine calculates the working hash array referred by @HashWorking
+ * from the challenge random numbers associated with the host, referred by
+ * @RandomChallenge. The result is put into the entry of the working hash
+ * array and returned by reference through @HashWorking.
+ **/
+static void
+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
+{
+	*HashWorking = (*RandomChallenge ^ *HashWorking);
+}
+
+/**
+ * lpfc_hba_init - Perform special handling for LC HBA initialization
+ * @phba: pointer to lpfc hba data structure.
+ * @hbainit: pointer to an array of unsigned 32-bit integers.
+ *
+ * This routine performs the special handling for LC HBA initialization.
+ **/
+void
+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
+{
+	int t;
+	uint32_t *HashWorking;
+	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
+
+	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
+	if (!HashWorking)
+		return;
+
+	HashWorking[0] = HashWorking[78] = *pwwnn++;
+	HashWorking[1] = HashWorking[79] = *pwwnn;
+
+	for (t = 0; t < 7; t++)
+		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
+
+	lpfc_sha_init(hbainit);
+	lpfc_sha_iterate(hbainit, HashWorking);
+	kfree(HashWorking);
+}
+
+/**
+ * lpfc_cleanup - Performs vport cleanups before deleting a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs the necessary cleanups before deleting the @vport.
+ * It invokes the discovery state machine to perform necessary state
+ * transitions and to release the ndlps associated with the @vport. Note,
+ * the physical port is treated as @vport 0.
+ **/
+void
+lpfc_cleanup(struct lpfc_vport *vport)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int i = 0;
+
+	if (phba->link_state > LPFC_LINK_DOWN)
+		lpfc_port_link_failure(vport);
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp)) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+			if (!ndlp)
+				continue;
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+			/* Trigger the release of the ndlp memory */
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+		spin_lock_irq(&phba->ndlp_lock);
+		if (NLP_CHK_FREE_REQ(ndlp)) {
+			/* The ndlp should not be in memory free mode already */
+			spin_unlock_irq(&phba->ndlp_lock);
+			continue;
+		} else
+			/* Indicate request for freeing ndlp memory */
+			NLP_SET_FREE_REQ(ndlp);
+		spin_unlock_irq(&phba->ndlp_lock);
+
+		if (vport->port_type != LPFC_PHYSICAL_PORT &&
+		    ndlp->nlp_DID == Fabric_DID) {
+			/* Just free up ndlp with Fabric_DID for vports */
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+
+		/* take care of nodes in unused state before the state
+		 * machine taking action.
+		 */
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+
+		if (ndlp->nlp_type & NLP_FABRIC)
+			lpfc_disc_state_machine(vport, ndlp, NULL,
+					NLP_EVT_DEVICE_RECOVERY);
+
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					     NLP_EVT_DEVICE_RM);
+	}
+
+	/* At this point, ALL ndlp's should be gone
+	 * because of the previous NLP_EVT_DEVICE_RM.
+	 * Lets wait for this to happen, if needed.
+	 */
+	while (!list_empty(&vport->fc_nodes)) {
+		if (i++ > 3000) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				"0233 Nodelist not empty\n");
+			list_for_each_entry_safe(ndlp, next_ndlp,
+						&vport->fc_nodes, nlp_listp) {
+				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+						LOG_NODE,
+						"0282 did:x%x ndlp:x%p "
+						"usgmap:x%x refcnt:%d\n",
+						ndlp->nlp_DID, (void *)ndlp,
+						ndlp->nlp_usg_map,
+						kref_read(&ndlp->kref));
+			}
+			break;
+		}
+
+		/* Wait for any activity on ndlps to settle */
+		msleep(10);
+	}
+	lpfc_cleanup_vports_rrqs(vport, NULL);
+}
+
+/**
+ * lpfc_stop_vport_timers - Stop all the timers associated with a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine stops all the timers associated with a @vport. This function
+ * is invoked before disabling or deleting a @vport. Note that the physical
+ * port is treated as @vport 0.
+ **/
+void
+lpfc_stop_vport_timers(struct lpfc_vport *vport)
+{
+	del_timer_sync(&vport->els_tmofunc);
+	del_timer_sync(&vport->delayed_disc_tmo);
+	lpfc_can_disctmo(vport);
+	return;
+}
+
+/**
+ * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
+ * caller of this routine should already hold the host lock.
+ **/
+void
+__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+	/* Clear pending FCF rediscovery wait flag */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
+	/* Now, try to stop the timer */
+	del_timer(&phba->fcf.redisc_wait);
+}
+
+/**
+ * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
+ * checks whether the FCF rediscovery wait timer is pending with the host
+ * lock held before proceeding with disabling the timer and clearing the
+ * wait timer pendig flag.
+ **/
+void
+lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+		/* FCF rediscovery timer already fired or stopped */
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+	/* Clear failover in progress flags */
+	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops all the timers associated with a HBA. This function is
+ * invoked before either putting a HBA offline or unloading the driver.
+ **/
+void
+lpfc_stop_hba_timers(struct lpfc_hba *phba)
+{
+	lpfc_stop_vport_timers(phba->pport);
+	del_timer_sync(&phba->sli.mbox_tmo);
+	del_timer_sync(&phba->fabric_block_timer);
+	del_timer_sync(&phba->eratt_poll);
+	del_timer_sync(&phba->hb_tmofunc);
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		del_timer_sync(&phba->rrq_tmr);
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	}
+	phba->hb_outstanding = 0;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		/* Stop any LightPulse device specific driver timers */
+		del_timer_sync(&phba->fcp_poll_timer);
+		break;
+	case LPFC_PCI_DEV_OC:
+		/* Stop any OneConnect device sepcific driver timers */
+		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0297 Invalid device group (x%x)\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as blocked. Once the HBA's
+ * management interface is marked as blocked, all the user space access to
+ * the HBA, whether they are from sysfs interface or libdfc interface will
+ * all be blocked. The HBA is set to block the management interface when the
+ * driver prepares the HBA interface for online or offline.
+ **/
+static void
+lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
+{
+	unsigned long iflag;
+	uint8_t actcmd = MBX_HEARTBEAT;
+	unsigned long timeout;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	if (mbx_action == LPFC_MBX_NO_WAIT)
+		return;
+	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	if (phba->sli.mbox_active) {
+		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
+		/* Determine how long we might wait for the active mailbox
+		 * command to be gracefully completed by firmware.
+		 */
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+				phba->sli.mbox_active) * 1000) + jiffies;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/* Wait for the outstnading mailbox command to complete */
+	while (phba->sli.mbox_active) {
+		/* Check active mailbox complete status every 2ms */
+		msleep(2);
+		if (time_after(jiffies, timeout)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2813 Mgmt IO is Blocked %x "
+				"- mbox cmd %x still active\n",
+				phba->sli.sli_flag, actcmd);
+			break;
+		}
+	}
+}
+
+/**
+ * lpfc_sli4_node_prep - Assign RPIs for active nodes.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Allocate RPIs for all active remote nodes. This is needed whenever
+ * an SLI4 adapter is reset and the driver is not unloading. Its purpose
+ * is to fixup the temporary rpi assignments.
+ **/
+void
+lpfc_sli4_node_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_nodelist  *ndlp, *next_ndlp;
+	struct lpfc_vport **vports;
+	int i, rpi;
+	unsigned long flags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports == NULL)
+		return;
+
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		if (vports[i]->load_flag & FC_UNLOADING)
+			continue;
+
+		list_for_each_entry_safe(ndlp, next_ndlp,
+					 &vports[i]->fc_nodes,
+					 nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			rpi = lpfc_sli4_alloc_rpi(phba);
+			if (rpi == LPFC_RPI_ALLOC_ERROR) {
+				spin_lock_irqsave(&phba->ndlp_lock, flags);
+				NLP_CLR_NODE_ACT(ndlp);
+				spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+				continue;
+			}
+			ndlp->nlp_rpi = rpi;
+			lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+					 "0009 rpi:%x DID:%x "
+					 "flg:%x map:%x %p\n", ndlp->nlp_rpi,
+					 ndlp->nlp_DID, ndlp->nlp_flag,
+					 ndlp->nlp_usg_map, ndlp);
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_online - Initialize and bring a HBA online
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initializes the HBA and brings a HBA online. During this
+ * process, the management interface is blocked to prevent user space access
+ * to the HBA interfering with the driver initialization.
+ *
+ * Return codes
+ *   0 - successful
+ *   1 - failed
+ **/
+int
+lpfc_online(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport;
+	struct lpfc_vport **vports;
+	int i, error = 0;
+	bool vpis_cleared = false;
+
+	if (!phba)
+		return 0;
+	vport = phba->pport;
+
+	if (!(vport->fc_flag & FC_OFFLINE_MODE))
+		return 0;
+
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"0458 Bring Adapter online\n");
+
+	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return 1;
+		}
+		spin_lock_irq(&phba->hbalock);
+		if (!phba->sli4_hba.max_cfg_param.vpi_used)
+			vpis_cleared = true;
+		spin_unlock_irq(&phba->hbalock);
+
+		/* Reestablish the local initiator port.
+		 * The offline process destroyed the previous lport.
+		 */
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
+				!phba->nvmet_support) {
+			error = lpfc_nvme_create_localport(phba->pport);
+			if (error)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6132 NVME restore reg failed "
+					"on nvmei error x%x\n", error);
+		}
+	} else {
+		lpfc_sli_queue_init(phba);
+		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return 1;
+		}
+	}
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			struct Scsi_Host *shost;
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			if (phba->sli_rev == LPFC_SLI_REV4) {
+				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+				if ((vpis_cleared) &&
+				    (vports[i]->port_type !=
+					LPFC_PHYSICAL_PORT))
+					vports[i]->vpi = 0;
+			}
+			spin_unlock_irq(shost->host_lock);
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	lpfc_unblock_mgmt_io(phba);
+	return 0;
+}
+
+/**
+ * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as not blocked. Once the
+ * HBA's management interface is marked as not blocked, all the user space
+ * access to the HBA, whether they are from sysfs interface or libdfc
+ * interface will be allowed. The HBA is set to block the management interface
+ * when the driver prepares the HBA interface for online or offline and then
+ * set to unblock the management interface afterwards.
+ **/
+void
+lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
+{
+	unsigned long iflag;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_offline_prep - Prepare a HBA to be brought offline
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to prepare a HBA to be brought offline. It performs
+ * unregistration login to all the nodes on all vports and flushes the mailbox
+ * queue to make it ready to be brought offline.
+ **/
+void
+lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_nodelist  *ndlp, *next_ndlp;
+	struct lpfc_vport **vports;
+	struct Scsi_Host *shost;
+	int i;
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		return;
+
+	lpfc_block_mgmt_io(phba, mbx_action);
+
+	lpfc_linkdown(phba);
+
+	/* Issue an unreg_login to all nodes on all vports */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->load_flag & FC_UNLOADING)
+				continue;
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
+			spin_unlock_irq(shost->host_lock);
+
+			shost =	lpfc_shost_from_vport(vports[i]);
+			list_for_each_entry_safe(ndlp, next_ndlp,
+						 &vports[i]->fc_nodes,
+						 nlp_listp) {
+				if (!NLP_CHK_NODE_ACT(ndlp))
+					continue;
+				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+					continue;
+				if (ndlp->nlp_type & NLP_FABRIC) {
+					lpfc_disc_state_machine(vports[i], ndlp,
+						NULL, NLP_EVT_DEVICE_RECOVERY);
+					lpfc_disc_state_machine(vports[i], ndlp,
+						NULL, NLP_EVT_DEVICE_RM);
+				}
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+				spin_unlock_irq(shost->host_lock);
+				/*
+				 * Whenever an SLI4 port goes offline, free the
+				 * RPI. Get a new RPI when the adapter port
+				 * comes back online.
+				 */
+				if (phba->sli_rev == LPFC_SLI_REV4) {
+					lpfc_printf_vlog(ndlp->vport,
+							 KERN_INFO, LOG_NODE,
+							 "0011 lpfc_offline: "
+							 "ndlp:x%p did %x "
+							 "usgmap:x%x rpi:%x\n",
+							 ndlp, ndlp->nlp_DID,
+							 ndlp->nlp_usg_map,
+							 ndlp->nlp_rpi);
+
+					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+				}
+				lpfc_unreg_rpi(vports[i], ndlp);
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
+}
+
+/**
+ * lpfc_offline - Bring a HBA offline
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine actually brings a HBA offline. It stops all the timers
+ * associated with the HBA, brings down the SLI layer, and eventually
+ * marks the HBA as in offline state for the upper layer protocol.
+ **/
+void
+lpfc_offline(struct lpfc_hba *phba)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_vport **vports;
+	int i;
+
+	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+		return;
+
+	/* stop port and all timers associated with this hba */
+	lpfc_stop_port(phba);
+
+	/* Tear down the local and target port registrations.  The
+	 * nvme transports need to cleanup.
+	 */
+	lpfc_nvmet_destroy_targetport(phba);
+	lpfc_nvme_destroy_localport(phba->pport);
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_stop_vport_timers(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"0460 Bring Adapter offline\n");
+	/* Bring down the SLI Layer and cleanup.  The HBA is offline
+	   now.  */
+	lpfc_sli_hba_down(phba);
+	spin_lock_irq(&phba->hbalock);
+	phba->work_ha = 0;
+	spin_unlock_irq(&phba->hbalock);
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->work_port_events = 0;
+			vports[i]->fc_flag |= FC_OFFLINE_MODE;
+			spin_unlock_irq(shost->host_lock);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the SCSI buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ **/
+static void
+lpfc_scsi_free(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *sb, *sb_next;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+		return;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Release all the lpfc_scsi_bufs maintained by this host. */
+
+	spin_lock(&phba->scsi_buf_list_put_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+				 list) {
+		list_del(&sb->list);
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+			      sb->dma_handle);
+		kfree(sb);
+		phba->total_scsi_bufs--;
+	}
+	spin_unlock(&phba->scsi_buf_list_put_lock);
+
+	spin_lock(&phba->scsi_buf_list_get_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+				 list) {
+		list_del(&sb->list);
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
+			      sb->dma_handle);
+		kfree(sb);
+		phba->total_scsi_bufs--;
+	}
+	spin_unlock(&phba->scsi_buf_list_get_lock);
+	spin_unlock_irq(&phba->hbalock);
+}
+/**
+ * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the NVME buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ **/
+static void
+lpfc_nvme_free(struct lpfc_hba *phba)
+{
+	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+		return;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Release all the lpfc_nvme_bufs maintained by this host. */
+	spin_lock(&phba->nvme_buf_list_put_lock);
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &phba->lpfc_nvme_buf_list_put, list) {
+		list_del(&lpfc_ncmd->list);
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+			      lpfc_ncmd->dma_handle);
+		kfree(lpfc_ncmd);
+		phba->total_nvme_bufs--;
+	}
+	spin_unlock(&phba->nvme_buf_list_put_lock);
+
+	spin_lock(&phba->nvme_buf_list_get_lock);
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &phba->lpfc_nvme_buf_list_get, list) {
+		list_del(&lpfc_ncmd->list);
+		dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+			      lpfc_ncmd->dma_handle);
+		kfree(lpfc_ncmd);
+		phba->total_nvme_bufs--;
+	}
+	spin_unlock(&phba->nvme_buf_list_get_lock);
+	spin_unlock_irq(&phba->hbalock);
+}
+/**
+ * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
+	uint16_t i, lxri, xri_cnt, els_xri_cnt;
+	LIST_HEAD(els_sgl_list);
+	int rc;
+
+	/*
+	 * update on pci function's els xri-sgl list
+	 */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+	if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
+		/* els xri-sgl expanded */
+		xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3157 ELS xri-sgl count increased from "
+				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
+				els_xri_cnt);
+		/* allocate the additional els sgls */
+		for (i = 0; i < xri_cnt; i++) {
+			sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
+					     GFP_KERNEL);
+			if (sglq_entry == NULL) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"2562 Failure to allocate an "
+						"ELS sgl entry:%d\n", i);
+				rc = -ENOMEM;
+				goto out_free_mem;
+			}
+			sglq_entry->buff_type = GEN_BUFF_TYPE;
+			sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
+							   &sglq_entry->phys);
+			if (sglq_entry->virt == NULL) {
+				kfree(sglq_entry);
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"2563 Failure to allocate an "
+						"ELS mbuf:%d\n", i);
+				rc = -ENOMEM;
+				goto out_free_mem;
+			}
+			sglq_entry->sgl = sglq_entry->virt;
+			memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+			sglq_entry->state = SGL_FREED;
+			list_add_tail(&sglq_entry->list, &els_sgl_list);
+		}
+		spin_lock_irq(&phba->hbalock);
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		list_splice_init(&els_sgl_list,
+				 &phba->sli4_hba.lpfc_els_sgl_list);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		spin_unlock_irq(&phba->hbalock);
+	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
+		/* els xri-sgl shrinked */
+		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3158 ELS xri-sgl count decreased from "
+				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
+				els_xri_cnt);
+		spin_lock_irq(&phba->hbalock);
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
+				 &els_sgl_list);
+		/* release extra els sgls from list */
+		for (i = 0; i < xri_cnt; i++) {
+			list_remove_head(&els_sgl_list,
+					 sglq_entry, struct lpfc_sglq, list);
+			if (sglq_entry) {
+				__lpfc_mbuf_free(phba, sglq_entry->virt,
+						 sglq_entry->phys);
+				kfree(sglq_entry);
+			}
+		}
+		list_splice_init(&els_sgl_list,
+				 &phba->sli4_hba.lpfc_els_sgl_list);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		spin_unlock_irq(&phba->hbalock);
+	} else
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3163 ELS xri-sgl count unchanged: %d\n",
+				els_xri_cnt);
+	phba->sli4_hba.els_xri_cnt = els_xri_cnt;
+
+	/* update xris to els sgls on the list */
+	sglq_entry = NULL;
+	sglq_entry_next = NULL;
+	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+				 &phba->sli4_hba.lpfc_els_sgl_list, list) {
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2400 Failed to allocate xri for "
+					"ELS sgl\n");
+			rc = -ENOMEM;
+			goto out_free_mem;
+		}
+		sglq_entry->sli4_lxritag = lxri;
+		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+	}
+	return 0;
+
+out_free_mem:
+	lpfc_free_els_sgl_list(phba);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
+	uint16_t i, lxri, xri_cnt, els_xri_cnt;
+	uint16_t nvmet_xri_cnt;
+	LIST_HEAD(nvmet_sgl_list);
+	int rc;
+
+	/*
+	 * update on pci function's nvmet xri-sgl list
+	 */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+	/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
+	nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+	if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
+		/* els xri-sgl expanded */
+		xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"6302 NVMET xri-sgl cnt grew from %d to %d\n",
+				phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
+		/* allocate the additional nvmet sgls */
+		for (i = 0; i < xri_cnt; i++) {
+			sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
+					     GFP_KERNEL);
+			if (sglq_entry == NULL) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"6303 Failure to allocate an "
+						"NVMET sgl entry:%d\n", i);
+				rc = -ENOMEM;
+				goto out_free_mem;
+			}
+			sglq_entry->buff_type = NVMET_BUFF_TYPE;
+			sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
+							   &sglq_entry->phys);
+			if (sglq_entry->virt == NULL) {
+				kfree(sglq_entry);
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"6304 Failure to allocate an "
+						"NVMET buf:%d\n", i);
+				rc = -ENOMEM;
+				goto out_free_mem;
+			}
+			sglq_entry->sgl = sglq_entry->virt;
+			memset(sglq_entry->sgl, 0,
+			       phba->cfg_sg_dma_buf_size);
+			sglq_entry->state = SGL_FREED;
+			list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
+		}
+		spin_lock_irq(&phba->hbalock);
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		list_splice_init(&nvmet_sgl_list,
+				 &phba->sli4_hba.lpfc_nvmet_sgl_list);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		spin_unlock_irq(&phba->hbalock);
+	} else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
+		/* nvmet xri-sgl shrunk */
+		xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"6305 NVMET xri-sgl count decreased from "
+				"%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
+				nvmet_xri_cnt);
+		spin_lock_irq(&phba->hbalock);
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
+				 &nvmet_sgl_list);
+		/* release extra nvmet sgls from list */
+		for (i = 0; i < xri_cnt; i++) {
+			list_remove_head(&nvmet_sgl_list,
+					 sglq_entry, struct lpfc_sglq, list);
+			if (sglq_entry) {
+				lpfc_nvmet_buf_free(phba, sglq_entry->virt,
+						    sglq_entry->phys);
+				kfree(sglq_entry);
+			}
+		}
+		list_splice_init(&nvmet_sgl_list,
+				 &phba->sli4_hba.lpfc_nvmet_sgl_list);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		spin_unlock_irq(&phba->hbalock);
+	} else
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"6306 NVMET xri-sgl count unchanged: %d\n",
+				nvmet_xri_cnt);
+	phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
+
+	/* update xris to nvmet sgls on the list */
+	sglq_entry = NULL;
+	sglq_entry_next = NULL;
+	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+				 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"6307 Failed to allocate xri for "
+					"NVMET sgl\n");
+			rc = -ENOMEM;
+			goto out_free_mem;
+		}
+		sglq_entry->sli4_lxritag = lxri;
+		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+	}
+	return 0;
+
+out_free_mem:
+	lpfc_free_nvmet_sgl_list(phba);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *psb, *psb_next;
+	uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
+	LIST_HEAD(scsi_sgl_list);
+	int rc;
+
+	/*
+	 * update on pci function's els xri-sgl list
+	 */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+	phba->total_scsi_bufs = 0;
+
+	/*
+	 * update on pci function's allocated scsi xri-sgl list
+	 */
+	/* maximum number of xris available for scsi buffers */
+	phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
+				      els_xri_cnt;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+		return 0;
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+		phba->sli4_hba.scsi_xri_max =  /* Split them up */
+			(phba->sli4_hba.scsi_xri_max *
+			 phba->cfg_xri_split) / 100;
+
+	spin_lock_irq(&phba->scsi_buf_list_get_lock);
+	spin_lock(&phba->scsi_buf_list_put_lock);
+	list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+	list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"6060 Current allocated SCSI xri-sgl count:%d, "
+			"maximum  SCSI xri count:%d (split:%d)\n",
+			phba->sli4_hba.scsi_xri_cnt,
+			phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
+
+	if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
+		/* max scsi xri shrinked below the allocated scsi buffers */
+		scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
+					phba->sli4_hba.scsi_xri_max;
+		/* release the extra allocated scsi buffers */
+		for (i = 0; i < scsi_xri_cnt; i++) {
+			list_remove_head(&scsi_sgl_list, psb,
+					 struct lpfc_scsi_buf, list);
+			if (psb) {
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+					      psb->data, psb->dma_handle);
+				kfree(psb);
+			}
+		}
+		spin_lock_irq(&phba->scsi_buf_list_get_lock);
+		phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
+		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+	}
+
+	/* update xris associated to remaining allocated scsi buffers */
+	psb = NULL;
+	psb_next = NULL;
+	list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2560 Failed to allocate xri for "
+					"scsi buffer\n");
+			rc = -ENOMEM;
+			goto out_free_mem;
+		}
+		psb->cur_iocbq.sli4_lxritag = lxri;
+		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+	}
+	spin_lock_irq(&phba->scsi_buf_list_get_lock);
+	spin_lock(&phba->scsi_buf_list_put_lock);
+	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+	return 0;
+
+out_free_mem:
+	lpfc_scsi_free(phba);
+	return rc;
+}
+
+static uint64_t
+lpfc_get_wwpn(struct lpfc_hba *phba)
+{
+	uint64_t wwn;
+	int rc;
+	LPFC_MBOXQ_t *mboxq;
+	MAILBOX_t *mb;
+
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						GFP_KERNEL);
+	if (!mboxq)
+		return (uint64_t)-1;
+
+	/* First get WWN of HBA instance */
+	lpfc_read_nv(phba, mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"6019 Mailbox failed , mbxCmd x%x "
+				"READ_NV, mbxStatus x%x\n",
+				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return (uint64_t) -1;
+	}
+	mb = &mboxq->u.mb;
+	memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
+	/* wwn is WWPN of HBA instance */
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		return be64_to_cpu(wwn);
+	else
+		return rol64(wwn, 32);
+}
+
+/**
+ * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
+{
+	struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
+	uint16_t i, lxri, els_xri_cnt;
+	uint16_t nvme_xri_cnt, nvme_xri_max;
+	LIST_HEAD(nvme_sgl_list);
+	int rc;
+
+	phba->total_nvme_bufs = 0;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+		return 0;
+	/*
+	 * update on pci function's allocated nvme xri-sgl list
+	 */
+
+	/* maximum number of xris available for nvme buffers */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+	nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+	phba->sli4_hba.nvme_xri_max = nvme_xri_max;
+	phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"6074 Current allocated NVME xri-sgl count:%d, "
+			"maximum  NVME xri count:%d\n",
+			phba->sli4_hba.nvme_xri_cnt,
+			phba->sli4_hba.nvme_xri_max);
+
+	spin_lock_irq(&phba->nvme_buf_list_get_lock);
+	spin_lock(&phba->nvme_buf_list_put_lock);
+	list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
+	list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+	spin_unlock(&phba->nvme_buf_list_put_lock);
+	spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+
+	if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
+		/* max nvme xri shrunk below the allocated nvme buffers */
+		spin_lock_irq(&phba->nvme_buf_list_get_lock);
+		nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
+					phba->sli4_hba.nvme_xri_max;
+		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+		/* release the extra allocated nvme buffers */
+		for (i = 0; i < nvme_xri_cnt; i++) {
+			list_remove_head(&nvme_sgl_list, lpfc_ncmd,
+					 struct lpfc_nvme_buf, list);
+			if (lpfc_ncmd) {
+				dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+					      lpfc_ncmd->data,
+					      lpfc_ncmd->dma_handle);
+				kfree(lpfc_ncmd);
+			}
+		}
+		spin_lock_irq(&phba->nvme_buf_list_get_lock);
+		phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
+		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+	}
+
+	/* update xris associated to remaining allocated nvme buffers */
+	lpfc_ncmd = NULL;
+	lpfc_ncmd_next = NULL;
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &nvme_sgl_list, list) {
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"6075 Failed to allocate xri for "
+					"nvme buffer\n");
+			rc = -ENOMEM;
+			goto out_free_mem;
+		}
+		lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
+		lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+	}
+	spin_lock_irq(&phba->nvme_buf_list_get_lock);
+	spin_lock(&phba->nvme_buf_list_put_lock);
+	list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+	INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+	spin_unlock(&phba->nvme_buf_list_put_lock);
+	spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+	return 0;
+
+out_free_mem:
+	lpfc_nvme_free(phba);
+	return rc;
+}
+
+/**
+ * lpfc_create_port - Create an FC port
+ * @phba: pointer to lpfc hba data structure.
+ * @instance: a unique integer ID to this FC port.
+ * @dev: pointer to the device data structure.
+ *
+ * This routine creates a FC port for the upper layer protocol. The FC port
+ * can be created on top of either a physical port or a virtual port provided
+ * by the HBA. This routine also allocates a SCSI host data structure (shost)
+ * and associates the FC port created before adding the shost into the SCSI
+ * layer.
+ *
+ * Return codes
+ *   @vport - pointer to the virtual N_Port data structure.
+ *   NULL - port create failed.
+ **/
+struct lpfc_vport *
+lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
+{
+	struct lpfc_vport *vport;
+	struct Scsi_Host  *shost = NULL;
+	int error = 0;
+	int i;
+	uint64_t wwn;
+	bool use_no_reset_hba = false;
+	int rc;
+
+	if (lpfc_no_hba_reset_cnt) {
+		if (phba->sli_rev < LPFC_SLI_REV4 &&
+		    dev == &phba->pcidev->dev) {
+			/* Reset the port first */
+			lpfc_sli_brdrestart(phba);
+			rc = lpfc_sli_chipset_init(phba);
+			if (rc)
+				return NULL;
+		}
+		wwn = lpfc_get_wwpn(phba);
+	}
+
+	for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
+		if (wwn == lpfc_no_hba_reset[i]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"6020 Setting use_no_reset port=%llx\n",
+					wwn);
+			use_no_reset_hba = true;
+			break;
+		}
+	}
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+		if (dev != &phba->pcidev->dev) {
+			shost = scsi_host_alloc(&lpfc_vport_template,
+						sizeof(struct lpfc_vport));
+		} else {
+			if (!use_no_reset_hba)
+				shost = scsi_host_alloc(&lpfc_template,
+						sizeof(struct lpfc_vport));
+			else
+				shost = scsi_host_alloc(&lpfc_template_no_hr,
+						sizeof(struct lpfc_vport));
+		}
+	} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		shost = scsi_host_alloc(&lpfc_template_nvme,
+					sizeof(struct lpfc_vport));
+	}
+	if (!shost)
+		goto out;
+
+	vport = (struct lpfc_vport *) shost->hostdata;
+	vport->phba = phba;
+	vport->load_flag |= FC_LOADING;
+	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+	vport->fc_rscn_flush = 0;
+	lpfc_get_vport_cfgparam(vport);
+
+	shost->unique_id = instance;
+	shost->max_id = LPFC_MAX_TARGET;
+	shost->max_lun = vport->cfg_max_luns;
+	shost->this_id = -1;
+	shost->max_cmd_len = 16;
+	shost->nr_hw_queues = phba->cfg_fcp_io_channel;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		shost->dma_boundary =
+			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
+		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+	}
+
+	/*
+	 * Set initial can_queue value since 0 is no longer supported and
+	 * scsi_add_host will fail. This will be adjusted later based on the
+	 * max xri value determined in hba setup.
+	 */
+	shost->can_queue = phba->cfg_hba_queue_depth - 10;
+	if (dev != &phba->pcidev->dev) {
+		shost->transportt = lpfc_vport_transport_template;
+		vport->port_type = LPFC_NPIV_PORT;
+	} else {
+		shost->transportt = lpfc_transport_template;
+		vport->port_type = LPFC_PHYSICAL_PORT;
+	}
+
+	/* Initialize all internally managed lists. */
+	INIT_LIST_HEAD(&vport->fc_nodes);
+	INIT_LIST_HEAD(&vport->rcv_buffer_list);
+	spin_lock_init(&vport->work_port_lock);
+
+	setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
+			(unsigned long)vport);
+
+	setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
+			(unsigned long)vport);
+
+	setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
+			(unsigned long)vport);
+
+	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
+	if (error)
+		goto out_put_shost;
+
+	spin_lock_irq(&phba->hbalock);
+	list_add_tail(&vport->listentry, &phba->port_list);
+	spin_unlock_irq(&phba->hbalock);
+	return vport;
+
+out_put_shost:
+	scsi_host_put(shost);
+out:
+	return NULL;
+}
+
+/**
+ * destroy_port -  destroy an FC port
+ * @vport: pointer to an lpfc virtual N_Port data structure.
+ *
+ * This routine destroys a FC port from the upper layer protocol. All the
+ * resources associated with the port are released.
+ **/
+void
+destroy_port(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	lpfc_debugfs_terminate(vport);
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_cleanup(vport);
+	return;
+}
+
+/**
+ * lpfc_get_instance - Get a unique integer ID
+ *
+ * This routine allocates a unique integer ID from lpfc_hba_index pool. It
+ * uses the kernel idr facility to perform the task.
+ *
+ * Return codes:
+ *   instance - a unique integer ID allocated as the new instance.
+ *   -1 - lpfc get instance failed.
+ **/
+int
+lpfc_get_instance(void)
+{
+	int ret;
+
+	ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
+	return ret < 0 ? -1 : ret;
+}
+
+/**
+ * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
+ * @shost: pointer to SCSI host data structure.
+ * @time: elapsed time of the scan in jiffies.
+ *
+ * This routine is called by the SCSI layer with a SCSI host to determine
+ * whether the scan host is finished.
+ *
+ * Note: there is no scan_start function as adapter initialization will have
+ * asynchronously kicked off the link initialization.
+ *
+ * Return codes
+ *   0 - SCSI host scan is not over yet.
+ *   1 - SCSI host scan is over.
+ **/
+int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int stat = 0;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (vport->load_flag & FC_UNLOADING) {
+		stat = 1;
+		goto finished;
+	}
+	if (time >= msecs_to_jiffies(30 * 1000)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0461 Scanning longer than 30 "
+				"seconds.  Continuing initialization\n");
+		stat = 1;
+		goto finished;
+	}
+	if (time >= msecs_to_jiffies(15 * 1000) &&
+	    phba->link_state <= LPFC_LINK_DOWN) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0465 Link down longer than 15 "
+				"seconds.  Continuing initialization\n");
+		stat = 1;
+		goto finished;
+	}
+
+	if (vport->port_state != LPFC_VPORT_READY)
+		goto finished;
+	if (vport->num_disc_nodes || vport->fc_prli_sent)
+		goto finished;
+	if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
+		goto finished;
+	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
+		goto finished;
+
+	stat = 1;
+
+finished:
+	spin_unlock_irq(shost->host_lock);
+	return stat;
+}
+
+/**
+ * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
+ * @shost: pointer to SCSI host data structure.
+ *
+ * This routine initializes a given SCSI host attributes on a FC port. The
+ * SCSI host can be either on top of a physical port or a virtual port.
+ **/
+void lpfc_host_attrib_init(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	/*
+	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
+	 */
+
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+	fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+	memset(fc_host_supported_fc4s(shost), 0,
+	       sizeof(fc_host_supported_fc4s(shost)));
+	fc_host_supported_fc4s(shost)[2] = 1;
+	fc_host_supported_fc4s(shost)[7] = 1;
+
+	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+				 sizeof fc_host_symbolic_name(shost));
+
+	fc_host_supported_speeds(shost) = 0;
+	if (phba->lmt & LMT_32Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
+	if (phba->lmt & LMT_16Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
+	if (phba->lmt & LMT_10Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
+	if (phba->lmt & LMT_8Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
+	if (phba->lmt & LMT_4Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
+	if (phba->lmt & LMT_2Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
+	if (phba->lmt & LMT_1Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+
+	fc_host_maxframe_size(shost) =
+		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
+
+	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
+
+	/* This value is also unchanging */
+	memset(fc_host_active_fc4s(shost), 0,
+	       sizeof(fc_host_active_fc4s(shost)));
+	fc_host_active_fc4s(shost)[2] = 1;
+	fc_host_active_fc4s(shost)[7] = 1;
+
+	fc_host_max_npiv_vports(shost) = phba->max_vpi;
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag &= ~FC_LOADING;
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_stop_port_s3 - Stop SLI3 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI3 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s3(struct lpfc_hba *phba)
+{
+	/* Clear all interrupt enable conditions */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	/* Clear all pending interrupts */
+	writel(0xffffffff, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+
+	/* Reset some HBA SLI setup states */
+	lpfc_stop_hba_timers(phba);
+	phba->pport->work_port_events = 0;
+}
+
+/**
+ * lpfc_stop_port_s4 - Stop SLI4 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI4 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s4(struct lpfc_hba *phba)
+{
+	/* Reset some HBA SLI4 setup states */
+	lpfc_stop_hba_timers(phba);
+	phba->pport->work_port_events = 0;
+	phba->sli4_hba.intr_enable = 0;
+}
+
+/**
+ * lpfc_stop_port - Wrapper function for stopping hba port
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+	phba->lpfc_stop_port(phba);
+}
+
+/**
+ * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
+ * @phba: Pointer to hba for which this call is being executed.
+ *
+ * This routine starts the timer waiting for the FCF rediscovery to complete.
+ **/
+void
+lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
+{
+	unsigned long fcf_redisc_wait_tmo =
+		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
+	/* Start fcf rediscovery wait period timer */
+	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
+	spin_lock_irq(&phba->hbalock);
+	/* Allow action to new fcf asynchronous event */
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+	/* Mark the FCF rediscovery pending state */
+	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when waiting for FCF table rediscover has been
+ * timed out. If new FCF record(s) has (have) been discovered during the
+ * wait period, a new FCF event shall be added to the FCOE async event
+ * list, and then worker thread shall be waked up for processing from the
+ * worker thread context.
+ **/
+static void
+lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+{
+	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+
+	/* Don't send FCF rediscovery event if timer cancelled */
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	/* Clear FCF rediscovery timer pending flag */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+	/* FCF rediscovery event to worker thread */
+	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2776 FCF rediscover quiescent timer expired\n");
+	/* wake up worker thread */
+	lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link fault code and
+ * translate it into the base driver's read link attention mailbox command
+ * status.
+ *
+ * Return: Link-attention status in terms of base driver's coding.
+ **/
+static uint16_t
+lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
+			   struct lpfc_acqe_link *acqe_link)
+{
+	uint16_t latt_fault;
+
+	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+	case LPFC_ASYNC_LINK_FAULT_NONE:
+	case LPFC_ASYNC_LINK_FAULT_LOCAL:
+	case LPFC_ASYNC_LINK_FAULT_REMOTE:
+		latt_fault = 0;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0398 Invalid link fault code: x%x\n",
+				bf_get(lpfc_acqe_link_fault, acqe_link));
+		latt_fault = MBXERR_ERROR;
+		break;
+	}
+	return latt_fault;
+}
+
+/**
+ * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link attention type and translate it
+ * into the base driver's link attention type coding.
+ *
+ * Return: Link attention type in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
+			  struct lpfc_acqe_link *acqe_link)
+{
+	uint8_t att_type;
+
+	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
+	case LPFC_ASYNC_LINK_STATUS_DOWN:
+	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
+		att_type = LPFC_ATT_LINK_DOWN;
+		break;
+	case LPFC_ASYNC_LINK_STATUS_UP:
+		/* Ignore physical link up events - wait for logical link up */
+		att_type = LPFC_ATT_RESERVED;
+		break;
+	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
+		att_type = LPFC_ATT_LINK_UP;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0399 Invalid link attention type: x%x\n",
+				bf_get(lpfc_acqe_link_status, acqe_link));
+		att_type = LPFC_ATT_RESERVED;
+		break;
+	}
+	return att_type;
+}
+
+/**
+ * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get an SLI3 FC port's link speed in Mbps.
+ *
+ * Return: link speed in terms of Mbps.
+ **/
+uint32_t
+lpfc_sli_port_speed_get(struct lpfc_hba *phba)
+{
+	uint32_t link_speed;
+
+	if (!lpfc_is_link_up(phba))
+		return 0;
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		switch (phba->fc_linkspeed) {
+		case LPFC_LINK_SPEED_1GHZ:
+			link_speed = 1000;
+			break;
+		case LPFC_LINK_SPEED_2GHZ:
+			link_speed = 2000;
+			break;
+		case LPFC_LINK_SPEED_4GHZ:
+			link_speed = 4000;
+			break;
+		case LPFC_LINK_SPEED_8GHZ:
+			link_speed = 8000;
+			break;
+		case LPFC_LINK_SPEED_10GHZ:
+			link_speed = 10000;
+			break;
+		case LPFC_LINK_SPEED_16GHZ:
+			link_speed = 16000;
+			break;
+		default:
+			link_speed = 0;
+		}
+	} else {
+		if (phba->sli4_hba.link_state.logical_speed)
+			link_speed =
+			      phba->sli4_hba.link_state.logical_speed;
+		else
+			link_speed = phba->sli4_hba.link_state.speed;
+	}
+	return link_speed;
+}
+
+/**
+ * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @evt_code: asynchronous event code.
+ * @speed_code: asynchronous event link speed code.
+ *
+ * This routine is to parse the giving SLI4 async event link speed code into
+ * value of Mbps for the link speed.
+ *
+ * Return: link speed in terms of Mbps.
+ **/
+static uint32_t
+lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
+			   uint8_t speed_code)
+{
+	uint32_t port_speed;
+
+	switch (evt_code) {
+	case LPFC_TRAILER_CODE_LINK:
+		switch (speed_code) {
+		case LPFC_ASYNC_LINK_SPEED_ZERO:
+			port_speed = 0;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_10MBPS:
+			port_speed = 10;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_100MBPS:
+			port_speed = 100;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_1GBPS:
+			port_speed = 1000;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_10GBPS:
+			port_speed = 10000;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_20GBPS:
+			port_speed = 20000;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_25GBPS:
+			port_speed = 25000;
+			break;
+		case LPFC_ASYNC_LINK_SPEED_40GBPS:
+			port_speed = 40000;
+			break;
+		default:
+			port_speed = 0;
+		}
+		break;
+	case LPFC_TRAILER_CODE_FC:
+		switch (speed_code) {
+		case LPFC_FC_LA_SPEED_UNKNOWN:
+			port_speed = 0;
+			break;
+		case LPFC_FC_LA_SPEED_1G:
+			port_speed = 1000;
+			break;
+		case LPFC_FC_LA_SPEED_2G:
+			port_speed = 2000;
+			break;
+		case LPFC_FC_LA_SPEED_4G:
+			port_speed = 4000;
+			break;
+		case LPFC_FC_LA_SPEED_8G:
+			port_speed = 8000;
+			break;
+		case LPFC_FC_LA_SPEED_10G:
+			port_speed = 10000;
+			break;
+		case LPFC_FC_LA_SPEED_16G:
+			port_speed = 16000;
+			break;
+		case LPFC_FC_LA_SPEED_32G:
+			port_speed = 32000;
+			break;
+		default:
+			port_speed = 0;
+		}
+		break;
+	default:
+		port_speed = 0;
+	}
+	return port_speed;
+}
+
+/**
+ * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FCoE link event.
+ **/
+static void
+lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_link *acqe_link)
+{
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_mbx_read_top *la;
+	uint8_t att_type;
+	int rc;
+
+	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
+	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
+		return;
+	phba->fcoe_eventtag = acqe_link->event_tag;
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0395 The mboxq allocation failed\n");
+		return;
+	}
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0396 The lpfc_dmabuf allocation failed\n");
+		goto out_free_pmb;
+	}
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0397 The mbuf allocation failed\n");
+		goto out_free_dmabuf;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Block ELS IOCBs until we have done process link event */
+	phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Update link event statistics */
+	phba->sli.slistat.link_event++;
+
+	/* Create lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = phba->pport;
+
+	/* Keep the link status for extra SLI4 state machine reference */
+	phba->sli4_hba.link_state.speed =
+			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
+				bf_get(lpfc_acqe_link_speed, acqe_link));
+	phba->sli4_hba.link_state.duplex =
+				bf_get(lpfc_acqe_link_duplex, acqe_link);
+	phba->sli4_hba.link_state.status =
+				bf_get(lpfc_acqe_link_status, acqe_link);
+	phba->sli4_hba.link_state.type =
+				bf_get(lpfc_acqe_link_type, acqe_link);
+	phba->sli4_hba.link_state.number =
+				bf_get(lpfc_acqe_link_number, acqe_link);
+	phba->sli4_hba.link_state.fault =
+				bf_get(lpfc_acqe_link_fault, acqe_link);
+	phba->sli4_hba.link_state.logical_speed =
+			bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2900 Async FC/FCoE Link event - Speed:%dGBit "
+			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
+			"Logical speed:%dMbps Fault:%d\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.topology,
+			phba->sli4_hba.link_state.status,
+			phba->sli4_hba.link_state.type,
+			phba->sli4_hba.link_state.number,
+			phba->sli4_hba.link_state.logical_speed,
+			phba->sli4_hba.link_state.fault);
+	/*
+	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
+	 * topology info. Note: Optional for non FC-AL ports.
+	 */
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			goto out_free_dmabuf;
+		return;
+	}
+	/*
+	 * For FCoE Mode: fill in all the topology information we need and call
+	 * the READ_TOPOLOGY completion routine to continue without actually
+	 * sending the READ_TOPOLOGY mailbox command to the port.
+	 */
+	/* Parse and translate status field */
+	mb = &pmb->u.mb;
+	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+	/* Parse and translate link attention fields */
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+	la->eventTag = acqe_link->event_tag;
+	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
+	bf_set(lpfc_mbx_read_top_link_spd, la,
+	       (bf_get(lpfc_acqe_link_speed, acqe_link)));
+
+	/* Fake the the following irrelvant fields */
+	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
+	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
+	bf_set(lpfc_mbx_read_top_il, la, 0);
+	bf_set(lpfc_mbx_read_top_pb, la, 0);
+	bf_set(lpfc_mbx_read_top_fa, la, 0);
+	bf_set(lpfc_mbx_read_top_mm, la, 0);
+
+	/* Invoke the lpfc_handle_latt mailbox command callback function */
+	lpfc_mbx_cmpl_read_topology(phba, pmb);
+
+	return;
+
+out_free_dmabuf:
+	kfree(mp);
+out_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async fc completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FC event. It will simply log
+ * that the event was received and then issue a read_topology mailbox command so
+ * that the rest of the driver will treat it the same as SLI3.
+ **/
+static void
+lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
+{
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_mbx_read_top *la;
+	int rc;
+
+	if (bf_get(lpfc_trailer_type, acqe_fc) !=
+	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2895 Non FC link Event detected.(%d)\n",
+				bf_get(lpfc_trailer_type, acqe_fc));
+		return;
+	}
+	/* Keep the link status for extra SLI4 state machine reference */
+	phba->sli4_hba.link_state.speed =
+			lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
+				bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
+	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
+	phba->sli4_hba.link_state.topology =
+				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
+	phba->sli4_hba.link_state.status =
+				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
+	phba->sli4_hba.link_state.type =
+				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
+	phba->sli4_hba.link_state.number =
+				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
+	phba->sli4_hba.link_state.fault =
+				bf_get(lpfc_acqe_link_fault, acqe_fc);
+	phba->sli4_hba.link_state.logical_speed =
+				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
+			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
+			"%dMbps Fault:%d\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.topology,
+			phba->sli4_hba.link_state.status,
+			phba->sli4_hba.link_state.type,
+			phba->sli4_hba.link_state.number,
+			phba->sli4_hba.link_state.logical_speed,
+			phba->sli4_hba.link_state.fault);
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2897 The mboxq allocation failed\n");
+		return;
+	}
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2898 The lpfc_dmabuf allocation failed\n");
+		goto out_free_pmb;
+	}
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2899 The mbuf allocation failed\n");
+		goto out_free_dmabuf;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Block ELS IOCBs until we have done process link event */
+	phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Update link event statistics */
+	phba->sli.slistat.link_event++;
+
+	/* Create lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = phba->pport;
+
+	if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
+		phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
+
+		switch (phba->sli4_hba.link_state.status) {
+		case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
+			phba->link_flag |= LS_MDS_LINK_DOWN;
+			break;
+		case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
+			phba->link_flag |= LS_MDS_LOOPBACK;
+			break;
+		default:
+			break;
+		}
+
+		/* Parse and translate status field */
+		mb = &pmb->u.mb;
+		mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
+							   (void *)acqe_fc);
+
+		/* Parse and translate link attention fields */
+		la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
+		la->eventTag = acqe_fc->event_tag;
+
+		if (phba->sli4_hba.link_state.status ==
+		    LPFC_FC_LA_TYPE_UNEXP_WWPN) {
+			bf_set(lpfc_mbx_read_top_att_type, la,
+			       LPFC_FC_LA_TYPE_UNEXP_WWPN);
+		} else {
+			bf_set(lpfc_mbx_read_top_att_type, la,
+			       LPFC_FC_LA_TYPE_LINK_DOWN);
+		}
+		/* Invoke the mailbox command callback function */
+		lpfc_mbx_cmpl_read_topology(phba, pmb);
+
+		return;
+	}
+
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		goto out_free_dmabuf;
+	return;
+
+out_free_dmabuf:
+	kfree(mp);
+out_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async SLI completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous SLI events.
+ **/
+static void
+lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
+{
+	char port_name;
+	char message[128];
+	uint8_t status;
+	uint8_t evt_type;
+	uint8_t operational = 0;
+	struct temp_event temp_event_data;
+	struct lpfc_acqe_misconfigured_event *misconfigured;
+	struct Scsi_Host  *shost;
+
+	evt_type = bf_get(lpfc_trailer_type, acqe_sli);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
+			"x%08x SLI Event Type:%d\n",
+			acqe_sli->event_data1, acqe_sli->event_data2,
+			evt_type);
+
+	port_name = phba->Port[0];
+	if (port_name == 0x00)
+		port_name = '?'; /* get port name is empty */
+
+	switch (evt_type) {
+	case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
+		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+		temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+		temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3190 Over Temperature:%d Celsius- Port Name %c\n",
+				acqe_sli->event_data1, port_name);
+
+		phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
+		shost = lpfc_shost_from_vport(phba->pport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+					  sizeof(temp_event_data),
+					  (char *)&temp_event_data,
+					  SCSI_NL_VID_TYPE_PCI
+					  | PCI_VENDOR_ID_EMULEX);
+		break;
+	case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
+		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+		temp_event_data.event_code = LPFC_NORMAL_TEMP;
+		temp_event_data.data = (uint32_t)acqe_sli->event_data1;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3191 Normal Temperature:%d Celsius - Port Name %c\n",
+				acqe_sli->event_data1, port_name);
+
+		shost = lpfc_shost_from_vport(phba->pport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+					  sizeof(temp_event_data),
+					  (char *)&temp_event_data,
+					  SCSI_NL_VID_TYPE_PCI
+					  | PCI_VENDOR_ID_EMULEX);
+		break;
+	case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
+		misconfigured = (struct lpfc_acqe_misconfigured_event *)
+					&acqe_sli->event_data1;
+
+		/* fetch the status for this port */
+		switch (phba->sli4_hba.lnk_info.lnk_no) {
+		case LPFC_LINK_NUMBER_0:
+			status = bf_get(lpfc_sli_misconfigured_port0_state,
+					&misconfigured->theEvent);
+			operational = bf_get(lpfc_sli_misconfigured_port0_op,
+					&misconfigured->theEvent);
+			break;
+		case LPFC_LINK_NUMBER_1:
+			status = bf_get(lpfc_sli_misconfigured_port1_state,
+					&misconfigured->theEvent);
+			operational = bf_get(lpfc_sli_misconfigured_port1_op,
+					&misconfigured->theEvent);
+			break;
+		case LPFC_LINK_NUMBER_2:
+			status = bf_get(lpfc_sli_misconfigured_port2_state,
+					&misconfigured->theEvent);
+			operational = bf_get(lpfc_sli_misconfigured_port2_op,
+					&misconfigured->theEvent);
+			break;
+		case LPFC_LINK_NUMBER_3:
+			status = bf_get(lpfc_sli_misconfigured_port3_state,
+					&misconfigured->theEvent);
+			operational = bf_get(lpfc_sli_misconfigured_port3_op,
+					&misconfigured->theEvent);
+			break;
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"3296 "
+					"LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
+					"event: Invalid link %d",
+					phba->sli4_hba.lnk_info.lnk_no);
+			return;
+		}
+
+		/* Skip if optic state unchanged */
+		if (phba->sli4_hba.lnk_info.optic_state == status)
+			return;
+
+		switch (status) {
+		case LPFC_SLI_EVENT_STATUS_VALID:
+			sprintf(message, "Physical Link is functional");
+			break;
+		case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
+			sprintf(message, "Optics faulted/incorrectly "
+				"installed/not installed - Reseat optics, "
+				"if issue not resolved, replace.");
+			break;
+		case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
+			sprintf(message,
+				"Optics of two types installed - Remove one "
+				"optic or install matching pair of optics.");
+			break;
+		case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
+			sprintf(message, "Incompatible optics - Replace with "
+				"compatible optics for card to function.");
+			break;
+		case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
+			sprintf(message, "Unqualified optics - Replace with "
+				"Avago optics for Warranty and Technical "
+				"Support - Link is%s operational",
+				(operational) ? " not" : "");
+			break;
+		case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
+			sprintf(message, "Uncertified optics - Replace with "
+				"Avago-certified optics to enable link "
+				"operation - Link is%s operational",
+				(operational) ? " not" : "");
+			break;
+		default:
+			/* firmware is reporting a status we don't know about */
+			sprintf(message, "Unknown event status x%02x", status);
+			break;
+		}
+		phba->sli4_hba.lnk_info.optic_state = status;
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3176 Port Name %c %s\n", port_name, message);
+		break;
+	case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3192 Remote DPort Test Initiated - "
+				"Event Data1:x%08x Event Data2: x%08x\n",
+				acqe_sli->event_data1, acqe_sli->event_data2);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3193 Async SLI event - Event Data1:x%08x Event Data2:"
+				"x%08x SLI Event Type:%d\n",
+				acqe_sli->event_data1, acqe_sli->event_data2,
+				evt_type);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	struct lpfc_hba *phba;
+
+	if (!vport)
+		return NULL;
+	phba = vport->phba;
+	if (!phba)
+		return NULL;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = lpfc_nlp_init(vport, Fabric_DID);
+		if (!ndlp)
+			return 0;
+		/* Set the node type */
+		ndlp->nlp_type |= NLP_FABRIC;
+		/* Put ndlp onto node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 0;
+	}
+	if ((phba->pport->port_state < LPFC_FLOGI) &&
+		(phba->pport->port_state != LPFC_VPORT_FAILED))
+		return NULL;
+	/* If virtual link is not yet instantiated ignore CVL */
+	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
+		&& (vport->port_state != LPFC_VPORT_FAILED))
+		return NULL;
+	shost = lpfc_shost_from_vport(vport);
+	if (!shost)
+		return NULL;
+	lpfc_linkdown_port(vport);
+	lpfc_cleanup_pending_mbox(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_VPORT_CVL_RCVD;
+	spin_unlock_irq(shost->host_lock);
+
+	return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_sli4_perform_vport_cvl(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async fcoe completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous fcoe event.
+ **/
+static void
+lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
+			struct lpfc_acqe_fip *acqe_fip)
+{
+	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
+	int rc;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+	int active_vlink_present;
+	struct lpfc_vport **vports;
+	int i;
+
+	phba->fc_eventTag = acqe_fip->event_tag;
+	phba->fcoe_eventtag = acqe_fip->event_tag;
+	switch (event_type) {
+	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
+	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
+		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+					LOG_DISCOVERY,
+					"2546 New FCF event, evt_tag:x%x, "
+					"index:x%x\n",
+					acqe_fip->event_tag,
+					acqe_fip->index);
+		else
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
+					LOG_DISCOVERY,
+					"2788 FCF param modified event, "
+					"evt_tag:x%x, index:x%x\n",
+					acqe_fip->event_tag,
+					acqe_fip->index);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			/*
+			 * During period of FCF discovery, read the FCF
+			 * table record indexed by the event to update
+			 * FCF roundrobin failover eligible FCF bmask.
+			 */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+					LOG_DISCOVERY,
+					"2779 Read FCF (x%x) for updating "
+					"roundrobin FCF failover bmask\n",
+					acqe_fip->index);
+			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
+		}
+
+		/* If the FCF discovery is in progress, do nothing. */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+		/* If fast FCF failover rescan event is pending, do nothing */
+		if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+
+		/* If the FCF has been in discovered state, do nothing. */
+		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+		spin_unlock_irq(&phba->hbalock);
+
+		/* Otherwise, scan the entire FCF table and re-discover SAN */
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2770 Start FCF table scan per async FCF "
+				"event, evt_tag:x%x, index:x%x\n",
+				acqe_fip->event_tag, acqe_fip->index);
+		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						     LPFC_FCOE_FCF_GET_FIRST);
+		if (rc)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+					"2547 Issue FCF scan read FCF mailbox "
+					"command failed (x%x)\n", rc);
+		break;
+
+	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2548 FCF Table full count 0x%x tag 0x%x\n",
+			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
+			acqe_fip->event_tag);
+		break;
+
+	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+			"2549 FCF (x%x) disconnected from network, "
+			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
+		/*
+		 * If we are in the middle of FCF failover process, clear
+		 * the corresponding FCF bit in the roundrobin bitmap.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
+		    (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
+			spin_unlock_irq(&phba->hbalock);
+			/* Update FLOGI FCF failover eligible FCF bmask */
+			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
+			break;
+		}
+		spin_unlock_irq(&phba->hbalock);
+
+		/* If the event is not for currently used fcf do nothing */
+		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
+			break;
+
+		/*
+		 * Otherwise, request the port to rediscover the entire FCF
+		 * table for a fast recovery from case that the current FCF
+		 * is no longer valid as we are not in the middle of FCF
+		 * failover process already.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		/* Mark the fast failover process in progress */
+		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+		spin_unlock_irq(&phba->hbalock);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2771 Start FCF fast failover process due to "
+				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+				"\n", acqe_fip->event_tag, acqe_fip->index);
+		rc = lpfc_sli4_redisc_fcf_table(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+					LOG_DISCOVERY,
+					"2772 Issue FCF rediscover mabilbox "
+					"command failed, fail through to FCF "
+					"dead event\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * Last resort will fail over by treating this
+			 * as a link down to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		} else {
+			/* Reset FCF roundrobin bmask for new discovery */
+			lpfc_sli4_clear_fcf_rr_bmask(phba);
+			/*
+			 * Handling fast FCF failover to a DEAD FCF event is
+			 * considered equalivant to receiving CVL to all vports.
+			 */
+			lpfc_sli4_perform_all_vport_cvl(phba);
+		}
+		break;
+	case LPFC_FIP_EVENT_TYPE_CVL:
+		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+			"2718 Clear Virtual Link Received for VPI 0x%x"
+			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
+		vport = lpfc_find_vport_by_vpid(phba,
+						acqe_fip->index);
+		ndlp = lpfc_sli4_perform_vport_cvl(vport);
+		if (!ndlp)
+			break;
+		active_vlink_present = 0;
+
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports) {
+			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+					i++) {
+				if ((!(vports[i]->fc_flag &
+					FC_VPORT_CVL_RCVD)) &&
+					(vports[i]->port_state > LPFC_FDISC)) {
+					active_vlink_present = 1;
+					break;
+				}
+			}
+			lpfc_destroy_vport_work_array(phba, vports);
+		}
+
+		/*
+		 * Don't re-instantiate if vport is marked for deletion.
+		 * If we are here first then vport_delete is going to wait
+		 * for discovery to complete.
+		 */
+		if (!(vport->load_flag & FC_UNLOADING) &&
+					active_vlink_present) {
+			/*
+			 * If there are other active VLinks present,
+			 * re-instantiate the Vlink using FDISC.
+			 */
+			mod_timer(&ndlp->nlp_delayfunc,
+				  jiffies + msecs_to_jiffies(1000));
+			shost = lpfc_shost_from_vport(vport);
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+			vport->port_state = LPFC_FDISC;
+		} else {
+			/*
+			 * Otherwise, we request port to rediscover
+			 * the entire FCF table for a fast recovery
+			 * from possible case that the current FCF
+			 * is no longer valid if we are not already
+			 * in the FCF failover process.
+			 */
+			spin_lock_irq(&phba->hbalock);
+			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+				spin_unlock_irq(&phba->hbalock);
+				break;
+			}
+			/* Mark the fast failover process in progress */
+			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+					LOG_DISCOVERY,
+					"2773 Start FCF failover per CVL, "
+					"evt_tag:x%x\n", acqe_fip->event_tag);
+			rc = lpfc_sli4_redisc_fcf_table(phba);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+						LOG_DISCOVERY,
+						"2774 Issue FCF rediscover "
+						"mabilbox command failed, "
+						"through to CVL event\n");
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+				spin_unlock_irq(&phba->hbalock);
+				/*
+				 * Last resort will be re-try on the
+				 * the current registered FCF entry.
+				 */
+				lpfc_retry_pport_discovery(phba);
+			} else
+				/*
+				 * Reset FCF roundrobin bmask for new
+				 * discovery.
+				 */
+				lpfc_sli4_clear_fcf_rr_bmask(phba);
+		}
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0288 Unknown FCoE event type 0x%x event tag "
+			"0x%x\n", event_type, acqe_fip->event_tag);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async dcbx completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous dcbx event.
+ **/
+static void
+lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_dcbx *acqe_dcbx)
+{
+	phba->fc_eventTag = acqe_dcbx->event_tag;
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0290 The SLI4 DCBX asynchronous event is not "
+			"handled yet\n");
+}
+
+/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change.  The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_grp5 *acqe_grp5)
+{
+	uint16_t prev_ll_spd;
+
+	phba->fc_eventTag = acqe_grp5->event_tag;
+	phba->fcoe_eventtag = acqe_grp5->event_tag;
+	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+	phba->sli4_hba.link_state.logical_speed =
+		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2789 GRP5 Async Event: Updating logical link speed "
+			"from %dMbps to %dMbps\n", prev_ll_spd,
+			phba->sli4_hba.link_state.logical_speed);
+}
+
+/**
+ * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 asynchronous events.
+ **/
+void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the async event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~ASYNC_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the async events */
+	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Process the asynchronous event */
+		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
+		case LPFC_TRAILER_CODE_LINK:
+			lpfc_sli4_async_link_evt(phba,
+						 &cq_event->cqe.acqe_link);
+			break;
+		case LPFC_TRAILER_CODE_FCOE:
+			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
+			break;
+		case LPFC_TRAILER_CODE_DCBX:
+			lpfc_sli4_async_dcbx_evt(phba,
+						 &cq_event->cqe.acqe_dcbx);
+			break;
+		case LPFC_TRAILER_CODE_GRP5:
+			lpfc_sli4_async_grp5_evt(phba,
+						 &cq_event->cqe.acqe_grp5);
+			break;
+		case LPFC_TRAILER_CODE_FC:
+			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
+			break;
+		case LPFC_TRAILER_CODE_SLI:
+			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
+			break;
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"1804 Invalid asynchrous event code: "
+					"x%x\n", bf_get(lpfc_trailer_code,
+					&cq_event->cqe.mcqe_cmpl));
+			break;
+		}
+		/* Free the completion event processed to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process FCF table
+ * rediscovery pending completion event.
+ **/
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
+{
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Clear FCF rediscovery timeout event */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
+	/* Clear driver fast failover FCF record flag */
+	phba->fcf.failover_rec.flag = 0;
+	/* Set state for FCF fast failover */
+	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Scan FCF table from the first entry to re-discover SAN */
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+			"2777 Start post-quiescent FCF table scan\n");
+	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+	if (rc)
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+				"2747 Issue FCF scan read FCF mailbox "
+				"command failed 0x%x\n", rc);
+}
+
+/**
+ * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
+ * @phba: pointer to lpfc hba data structure.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine is invoked to set up the per HBA PCI-Device group function
+ * API jump table entries.
+ *
+ * Return: 0 if success, otherwise -ENODEV
+ **/
+int
+lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	int rc;
+
+	/* Set up lpfc PCI-device group */
+	phba->pci_dev_grp = dev_grp;
+
+	/* The LPFC_PCI_DEV_OC uses SLI4 */
+	if (dev_grp == LPFC_PCI_DEV_OC)
+		phba->sli_rev = LPFC_SLI_REV4;
+
+	/* Set up device INIT API function jump table */
+	rc = lpfc_init_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up SCSI API function jump table */
+	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up SLI API function jump table */
+	rc = lpfc_sli_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up MBOX API function jump table */
+	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+
+	return 0;
+}
+
+/**
+ * lpfc_log_intr_mode - Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ **/
+static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+	switch (intr_mode) {
+	case 0:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0470 Enable INTx interrupt mode.\n");
+		break;
+	case 1:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0481 Enabled MSI interrupt mode.\n");
+		break;
+	case 2:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0480 Enabled MSI-X interrupt mode.\n");
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0482 Illegal interrupt mode.\n");
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_enable_pci_dev - Enable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the PCI device that is common to all
+ * PCI devices.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_enable_pci_dev(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		goto out_error;
+	else
+		pdev = phba->pcidev;
+	/* Enable PCI device */
+	if (pci_enable_device_mem(pdev))
+		goto out_error;
+	/* Request PCI resource for the device */
+	if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
+		goto out_disable_device;
+	/* Set up device as PCI master and save state for EEH */
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+	pci_save_state(pdev);
+
+	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+	if (pci_is_pcie(pdev))
+		pdev->needs_freset = 1;
+
+	return 0;
+
+out_disable_device:
+	pci_disable_device(pdev);
+out_error:
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"1401 Failed to enable pci device\n");
+	return -ENODEV;
+}
+
+/**
+ * lpfc_disable_pci_dev - Disable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the PCI device that is common to all
+ * PCI devices.
+ **/
+static void
+lpfc_disable_pci_dev(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+	/* Release PCI resource and disable PCI device */
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+/**
+ * lpfc_reset_hba - Reset a hba
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to reset a hba device. It brings the HBA
+ * offline, performs a board restart, and then brings the board back
+ * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
+ * on outstanding mailbox commands.
+ **/
+void
+lpfc_reset_hba(struct lpfc_hba *phba)
+{
+	/* If resets are disabled then set error state and return. */
+	if (!phba->cfg_enable_hba_reset) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return;
+	}
+	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+	else
+		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+	lpfc_offline(phba);
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+	lpfc_unblock_mgmt_io(phba);
+}
+
+/**
+ * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+uint16_t
+lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev = phba->pcidev;
+	uint16_t nr_virtfn;
+	int pos;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (pos == 0)
+		return 0;
+
+	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
+	return nr_virtfn;
+}
+
+/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+	struct pci_dev *pdev = phba->pcidev;
+	uint16_t max_nr_vfn;
+	int rc;
+
+	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+	if (nr_vfn > max_nr_vfn) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3057 Requested vfs (%d) greater than "
+				"supported vfs (%d)", nr_vfn, max_nr_vfn);
+		return -EINVAL;
+	}
+
+	rc = pci_enable_sriov(pdev, nr_vfn);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2806 Failed to enable sriov on this device "
+				"with vfn number nr_vf:%d, rc:%d\n",
+				nr_vfn, rc);
+	} else
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2807 Successful enable sriov on this device "
+				"with vfn number nr_vf:%d\n", nr_vfn);
+	return rc;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ *	0 - successful
+ *	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+
+	/*
+	 * Driver resources common to all SLI revisions
+	 */
+	atomic_set(&phba->fast_event_count, 0);
+	spin_lock_init(&phba->hbalock);
+
+	/* Initialize ndlp management spinlock */
+	spin_lock_init(&phba->ndlp_lock);
+
+	INIT_LIST_HEAD(&phba->port_list);
+	INIT_LIST_HEAD(&phba->work_list);
+	init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+	/* Initialize the wait queue head for the kernel thread */
+	init_waitqueue_head(&phba->work_waitq);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"1403 Protocols supported %s %s %s\n",
+			((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
+				"SCSI" : " "),
+			((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
+				"NVME" : " "),
+			(phba->nvmet_support ? "NVMET" : " "));
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+		/* Initialize the scsi buffer list used by driver for scsi IO */
+		spin_lock_init(&phba->scsi_buf_list_get_lock);
+		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+		spin_lock_init(&phba->scsi_buf_list_put_lock);
+		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+	}
+
+	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+		(phba->nvmet_support == 0)) {
+		/* Initialize the NVME buffer list used by driver for NVME IO */
+		spin_lock_init(&phba->nvme_buf_list_get_lock);
+		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+		spin_lock_init(&phba->nvme_buf_list_put_lock);
+		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+	}
+
+	/* Initialize the fabric iocb list */
+	INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+	/* Initialize list to save ELS buffers */
+	INIT_LIST_HEAD(&phba->elsbuf);
+
+	/* Initialize FCF connection rec list */
+	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+	/* Initialize OAS configuration list */
+	spin_lock_init(&phba->devicelock);
+	INIT_LIST_HEAD(&phba->luns);
+
+	/* MBOX heartbeat timer */
+	setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
+	/* Fabric block timer */
+	setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
+			(unsigned long)phba);
+	/* EA polling mode timer */
+	setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
+			(unsigned long)phba);
+	/* Heartbeat timer */
+	setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/*
+	 * Initialize timers used by driver
+	 */
+
+	/* FCP polling mode timer */
+	setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
+			(unsigned long)phba);
+
+	/* Host attention work mask setup */
+	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
+	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
+	/* Set up phase-1 common device driver resources */
+
+	rc = lpfc_setup_driver_resource_phase1(phba);
+	if (rc)
+		return -ENODEV;
+
+	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+		phba->menlo_flag |= HBA_MENLO_SUPPORT;
+		/* check for menlo minimum sg count */
+		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
+			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+	}
+
+	if (!phba->sli.sli3_ring)
+		phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
+			sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+	if (!phba->sli.sli3_ring)
+		return -ENOMEM;
+
+	/*
+	 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be dynamically calculated.
+	 */
+
+	/* Initialize the host templates the configured values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
+	if (phba->cfg_enable_bg) {
+		/*
+		 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+		 * the FCP rsp, and a BDE for each. Sice we have no control
+		 * over how many protection data segments the SCSI Layer
+		 * will hand us (ie: there could be one for every block
+		 * in the IO), we just allocate enough BDEs to accomidate
+		 * our max amount and we need to limit lpfc_sg_seg_cnt to
+		 * minimize the risk of running out.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp) +
+			(LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+			phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+		/* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+		phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+	} else {
+		/*
+		 * The scsi_buf for a regular I/O will hold the FCP cmnd,
+		 * the FCP rsp, a BDE for each, and a BDE for up to
+		 * cfg_sg_seg_cnt data segments.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp) +
+			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+		/* Total BDEs in BPL for scsi_sg_list */
+		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+			"9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+			phba->cfg_total_seg_cnt);
+
+	phba->max_vpi = LPFC_MAX_VPI;
+	/* This will be set to correct value after config_port mbox */
+	phba->max_vports = 0;
+
+	/*
+	 * Initialize the SLI Layer to run with lpfc HBAs.
+	 */
+	lpfc_sli_setup(phba);
+	lpfc_sli_queue_init(phba);
+
+	/* Allocate device driver memory */
+	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
+		return -ENOMEM;
+
+	/*
+	 * Enable sr-iov virtual functions if supported and configured
+	 * through the module parameter.
+	 */
+	if (phba->cfg_sriov_nr_virtfn > 0) {
+		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+						 phba->cfg_sriov_nr_virtfn);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"2808 Requested number of SR-IOV "
+					"virtual functions (%d) is not "
+					"supported\n",
+					phba->cfg_sriov_nr_virtfn);
+			phba->cfg_sriov_nr_virtfn = 0;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-3 HBA device it attached to.
+ **/
+static void
+lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
+{
+	/* Free device driver memory allocated */
+	lpfc_mem_free_all(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-4 HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	MAILBOX_t *mb;
+	int rc, i, max_buf_size;
+	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+	struct lpfc_mqe *mqe;
+	int longs;
+	int fof_vectors = 0;
+	uint64_t wwn;
+
+	phba->sli4_hba.num_online_cpu = num_online_cpus();
+	phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+	phba->sli4_hba.curr_disp_cpu = 0;
+
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
+
+	/* Set up phase-1 common device driver resources */
+	rc = lpfc_setup_driver_resource_phase1(phba);
+	if (rc)
+		return -ENODEV;
+
+	/* Before proceed, wait for POST done and device ready */
+	rc = lpfc_sli4_post_status_check(phba);
+	if (rc)
+		return -ENODEV;
+
+	/*
+	 * Initialize timers used by driver
+	 */
+
+	setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
+
+	/* FCF rediscover timer */
+	setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
+			(unsigned long)phba);
+
+	/*
+	 * Control structure for handling external multi-buffer mailbox
+	 * command pass-through.
+	 */
+	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+		sizeof(struct lpfc_mbox_ext_buf_ctx));
+	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+	phba->max_vpi = LPFC_MAX_VPI;
+
+	/* This will be set to correct value after the read_config mbox */
+	phba->max_vports = 0;
+
+	/* Program the default value of vlan_id and fc_map */
+	phba->valid_vlan = 0;
+	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+	/*
+	 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
+	 * we will associate a new ring, for each EQ/CQ/WQ tuple.
+	 * The WQ create will allocate the ring.
+	 */
+
+	/*
+	 * It doesn't matter what family our adapter is in, we are
+	 * limited to 2 Pages, 512 SGEs, for our SGL.
+	 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+	 */
+	max_buf_size = (2 * SLI4_PAGE_SIZE);
+	if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+		phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+	/*
+	 * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be calculated.
+	 */
+	if (phba->cfg_enable_bg) {
+		/*
+		 * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
+		 * the FCP rsp, and a SGE. Sice we have no control
+		 * over how many protection segments the SCSI Layer
+		 * will hand us (ie: there could be one for every block
+		 * in the IO), just allocate enough SGEs to accomidate
+		 * our max amount and we need to limit lpfc_sg_seg_cnt
+		 * to minimize the risk of running out.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+				sizeof(struct fcp_rsp) + max_buf_size;
+
+		/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+		phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+		if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+			phba->cfg_sg_seg_cnt =
+				LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+	} else {
+		/*
+		 * The scsi_buf for a regular I/O holds the FCP cmnd,
+		 * the FCP rsp, a SGE for each, and a SGE for up to
+		 * cfg_sg_seg_cnt data segments.
+		 */
+		phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+				sizeof(struct fcp_rsp) +
+				((phba->cfg_sg_seg_cnt + 2) *
+				sizeof(struct sli4_sge));
+
+		/* Total SGEs for scsi_sg_list */
+		phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+
+		/*
+		 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+		 * need to post 1 page for the SGL.
+		 */
+	}
+
+	/* Initialize the host templates with the updated values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+	if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+		phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+	else
+		phba->cfg_sg_dma_buf_size =
+			SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+			"9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+			phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+			phba->cfg_total_seg_cnt);
+
+	/* Initialize buffer queue management fields */
+	INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
+	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
+	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+
+	/*
+	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
+	 */
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+		/* Initialize the Abort scsi buffer list used by driver */
+		spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	}
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		/* Initialize the Abort nvme buffer list used by driver */
+		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
+
+		/* Fast-path XRI aborted CQ Event work queue list */
+		INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+	}
+
+	/* This abort list used by worker thread */
+	spin_lock_init(&phba->sli4_hba.sgl_list_lock);
+	spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
+
+	/*
+	 * Initialize driver internal slow-path work queues
+	 */
+
+	/* Driver internel slow-path CQ Event pool */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
+	/* Response IOCB work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
+	/* Asynchronous event CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
+	/* Fast-path XRI aborted CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+	/* Slow-path XRI aborted CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
+	/* Receive queue CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+
+	/* Initialize extent block lists. */
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
+	/* Initialize mboxq lists. If the early init routines fail
+	 * these lists need to be correctly initialized.
+	 */
+	INIT_LIST_HEAD(&phba->sli.mboxq);
+	INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
+
+	/* initialize optic_state to 0xFF */
+	phba->sli4_hba.lnk_info.optic_state = 0xff;
+
+	/* Allocate device driver memory */
+	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+	if (rc)
+		return -ENOMEM;
+
+	/* IF Type 2 ports get initialized now. */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		rc = lpfc_pci_function_reset(phba);
+		if (unlikely(rc)) {
+			rc = -ENODEV;
+			goto out_free_mem;
+		}
+		phba->temp_sensor_support = 1;
+	}
+
+	/* Create the bootstrap mailbox command */
+	rc = lpfc_create_bootstrap_mbox(phba);
+	if (unlikely(rc))
+		goto out_free_mem;
+
+	/* Set up the host's endian order with the device. */
+	rc = lpfc_setup_endian_order(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* Set up the hba's configuration parameters. */
+	rc = lpfc_sli4_read_config(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* IF Type 0 ports get initialized now. */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_0) {
+		rc = lpfc_pci_function_reset(phba);
+		if (unlikely(rc))
+			goto out_free_bsmbx;
+	}
+
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto out_free_bsmbx;
+	}
+
+	/* Check for NVMET being configured */
+	phba->nvmet_support = 0;
+	if (lpfc_enable_nvmet_cnt) {
+
+		/* First get WWN of HBA instance */
+		lpfc_read_nv(phba, mboxq);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"6016 Mailbox failed , mbxCmd x%x "
+					"READ_NV, mbxStatus x%x\n",
+					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+					bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			rc = -EIO;
+			goto out_free_bsmbx;
+		}
+		mb = &mboxq->u.mb;
+		memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
+		       sizeof(uint64_t));
+		wwn = cpu_to_be64(wwn);
+		phba->sli4_hba.wwnn.u.name = wwn;
+		memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
+		       sizeof(uint64_t));
+		/* wwn is WWPN of HBA instance */
+		wwn = cpu_to_be64(wwn);
+		phba->sli4_hba.wwpn.u.name = wwn;
+
+		/* Check to see if it matches any module parameter */
+		for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
+			if (wwn == lpfc_enable_nvmet[i]) {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+				if (lpfc_nvmet_mem_alloc(phba))
+					break;
+
+				phba->nvmet_support = 1; /* a match */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6017 NVME Target %016llx\n",
+						wwn);
+#else
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6021 Can't enable NVME Target."
+						" NVME_TARGET_FC infrastructure"
+						" is not in kernel\n");
+#endif
+				break;
+			}
+		}
+	}
+
+	lpfc_nvme_mod_param_dep(phba);
+
+	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
+	lpfc_supported_pages(mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (!rc) {
+		mqe = &mboxq->u.mqe;
+		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+		       LPFC_MAX_SUPPORTED_PAGES);
+		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+			switch (pn_page[i]) {
+			case LPFC_SLI4_PARAMETERS:
+				phba->sli4_hba.pc_sli4_params.supported = 1;
+				break;
+			default:
+				break;
+			}
+		}
+		/* Read the port's SLI4 Parameters capabilities if supported. */
+		if (phba->sli4_hba.pc_sli4_params.supported)
+			rc = lpfc_pc_sli4_params_get(phba, mboxq);
+		if (rc) {
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			rc = -EIO;
+			goto out_free_bsmbx;
+		}
+	}
+
+	/*
+	 * Get sli4 parameters that override parameters from Port capabilities.
+	 * If this call fails, it isn't critical unless the SLI4 parameters come
+	 * back in conflict.
+	 */
+	rc = lpfc_get_sli4_parameters(phba, mboxq);
+	if (rc) {
+		if (phba->sli4_hba.extents_in_use &&
+		    phba->sli4_hba.rpi_hdrs_in_use) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2999 Unsupported SLI4 Parameters "
+				"Extents and RPI headers enabled.\n");
+		}
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		rc = -EIO;
+		goto out_free_bsmbx;
+	}
+
+	mempool_free(mboxq, phba->mbox_mem_pool);
+
+	/* Verify OAS is supported */
+	lpfc_sli4_oas_verify(phba);
+	if (phba->cfg_fof)
+		fof_vectors = 1;
+
+	/* Verify all the SLI4 queues */
+	rc = lpfc_sli4_queue_verify(phba);
+	if (rc)
+		goto out_free_bsmbx;
+
+	/* Create driver internal CQE event pool */
+	rc = lpfc_sli4_cq_event_pool_create(phba);
+	if (rc)
+		goto out_free_bsmbx;
+
+	/* Initialize sgl lists per host */
+	lpfc_init_sgl_list(phba);
+
+	/* Allocate and initialize active sgl array */
+	rc = lpfc_init_active_sgl_array(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1430 Failed to initialize sgl list.\n");
+		goto out_destroy_cq_event_pool;
+	}
+	rc = lpfc_sli4_init_rpi_hdrs(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1432 Failed to initialize rpi headers.\n");
+		goto out_free_active_sgl;
+	}
+
+	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
+	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+					 GFP_KERNEL);
+	if (!phba->fcf.fcf_rr_bmask) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2759 Failed allocate memory for FCF round "
+				"robin failover bmask\n");
+		rc = -ENOMEM;
+		goto out_remove_rpi_hdrs;
+	}
+
+	phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
+						sizeof(struct lpfc_hba_eq_hdl),
+						GFP_KERNEL);
+	if (!phba->sli4_hba.hba_eq_hdl) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2572 Failed allocate memory for "
+				"fast-path per-EQ handle array\n");
+		rc = -ENOMEM;
+		goto out_free_fcf_rr_bmask;
+	}
+
+	phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
+					sizeof(struct lpfc_vector_map_info),
+					GFP_KERNEL);
+	if (!phba->sli4_hba.cpu_map) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3327 Failed allocate memory for msi-x "
+				"interrupt vector mapping\n");
+		rc = -ENOMEM;
+		goto out_free_hba_eq_hdl;
+	}
+	if (lpfc_used_cpu == NULL) {
+		lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
+						GFP_KERNEL);
+		if (!lpfc_used_cpu) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3335 Failed allocate memory for msi-x "
+					"interrupt vector mapping\n");
+			kfree(phba->sli4_hba.cpu_map);
+			rc = -ENOMEM;
+			goto out_free_hba_eq_hdl;
+		}
+		for (i = 0; i < lpfc_present_cpu; i++)
+			lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
+	}
+
+	/*
+	 * Enable sr-iov virtual functions if supported and configured
+	 * through the module parameter.
+	 */
+	if (phba->cfg_sriov_nr_virtfn > 0) {
+		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+						 phba->cfg_sriov_nr_virtfn);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"3020 Requested number of SR-IOV "
+					"virtual functions (%d) is not "
+					"supported\n",
+					phba->cfg_sriov_nr_virtfn);
+			phba->cfg_sriov_nr_virtfn = 0;
+		}
+	}
+
+	return 0;
+
+out_free_hba_eq_hdl:
+	kfree(phba->sli4_hba.hba_eq_hdl);
+out_free_fcf_rr_bmask:
+	kfree(phba->fcf.fcf_rr_bmask);
+out_remove_rpi_hdrs:
+	lpfc_sli4_remove_rpi_hdrs(phba);
+out_free_active_sgl:
+	lpfc_free_active_sgl(phba);
+out_destroy_cq_event_pool:
+	lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_bsmbx:
+	lpfc_destroy_bootstrap_mbox(phba);
+out_free_mem:
+	lpfc_mem_free(phba);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-4 HBA device it attached to.
+ **/
+static void
+lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+{
+	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+
+	/* Free memory allocated for msi-x interrupt vector to CPU mapping */
+	kfree(phba->sli4_hba.cpu_map);
+	phba->sli4_hba.num_present_cpu = 0;
+	phba->sli4_hba.num_online_cpu = 0;
+	phba->sli4_hba.curr_disp_cpu = 0;
+
+	/* Free memory allocated for fast-path work queue handles */
+	kfree(phba->sli4_hba.hba_eq_hdl);
+
+	/* Free the allocated rpi headers. */
+	lpfc_sli4_remove_rpi_hdrs(phba);
+	lpfc_sli4_remove_rpis(phba);
+
+	/* Free eligible FCF index bmask */
+	kfree(phba->fcf.fcf_rr_bmask);
+
+	/* Free the ELS sgl list */
+	lpfc_free_active_sgl(phba);
+	lpfc_free_els_sgl_list(phba);
+	lpfc_free_nvmet_sgl_list(phba);
+
+	/* Free the completion queue EQ event pool */
+	lpfc_sli4_cq_event_release_all(phba);
+	lpfc_sli4_cq_event_pool_destroy(phba);
+
+	/* Release resource identifiers. */
+	lpfc_sli4_dealloc_resource_identifiers(phba);
+
+	/* Free the bsmbx region. */
+	lpfc_destroy_bootstrap_mbox(phba);
+
+	/* Free the SLI Layer memory with SLI4 HBAs */
+	lpfc_mem_free_all(phba);
+
+	/* Free the current connect table */
+	list_for_each_entry_safe(conn_entry, next_conn_entry,
+		&phba->fcf_conn_rec_list, list) {
+		list_del_init(&conn_entry->list);
+		kfree(conn_entry);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_init_api_table_setup - Set up init api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	phba->lpfc_hba_init_link = lpfc_hba_init_link;
+	phba->lpfc_hba_down_link = lpfc_hba_down_link;
+	phba->lpfc_selective_reset = lpfc_selective_reset;
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+		phba->lpfc_stop_port = lpfc_stop_port_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
+		phba->lpfc_stop_port = lpfc_stop_port_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1431 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	int error;
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					  "lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		return error;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up after
+ * the device specific resource setup for supporting the HBA device it
+ * attached to.
+ **/
+static void
+lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	/* Stop kernel worker thread */
+	kthread_stop(phba->worker_thread);
+}
+
+/**
+ * lpfc_free_iocb_list - Free iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's IOCB list and memory.
+ **/
+void
+lpfc_free_iocb_list(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocbq_entry, iocbq_next,
+				 &phba->lpfc_iocb_list, list) {
+		list_del(&iocbq_entry->list);
+		kfree(iocbq_entry);
+		phba->total_iocbq_bufs--;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+/**
+ * lpfc_init_iocb_list - Allocate and initialize iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's IOCB
+ * list and set up the IOCB tag array accordingly.
+ *
+ * Return codes
+ *	0 - successful
+ *	other values - error
+ **/
+int
+lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
+{
+	struct lpfc_iocbq *iocbq_entry = NULL;
+	uint16_t iotag;
+	int i;
+
+	/* Initialize and populate the iocb list per host.  */
+	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+	for (i = 0; i < iocb_count; i++) {
+		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+		if (iocbq_entry == NULL) {
+			printk(KERN_ERR "%s: only allocated %d iocbs of "
+				"expected %d count. Unloading driver.\n",
+				__func__, i, LPFC_IOCB_LIST_CNT);
+			goto out_free_iocbq;
+		}
+
+		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
+		if (iotag == 0) {
+			kfree(iocbq_entry);
+			printk(KERN_ERR "%s: failed to allocate IOTAG. "
+				"Unloading driver.\n", __func__);
+			goto out_free_iocbq;
+		}
+		iocbq_entry->sli4_lxritag = NO_XRI;
+		iocbq_entry->sli4_xritag = NO_XRI;
+
+		spin_lock_irq(&phba->hbalock);
+		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+		phba->total_iocbq_bufs++;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	return 0;
+
+out_free_iocbq:
+	lpfc_free_iocb_list(phba);
+
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_free_sgl_list - Free a given sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ * @sglq_list: pointer to the head of sgl list.
+ *
+ * This routine is invoked to free a give sgl list and memory.
+ **/
+void
+lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+
+	list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
+		list_del(&sglq_entry->list);
+		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+		kfree(sglq_entry);
+	}
+}
+
+/**
+ * lpfc_free_els_sgl_list - Free els sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's els sgl list and memory.
+ **/
+static void
+lpfc_free_els_sgl_list(struct lpfc_hba *phba)
+{
+	LIST_HEAD(sglq_list);
+
+	/* Retrieve all els sgls from driver list */
+	spin_lock_irq(&phba->hbalock);
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Now free the sgl list */
+	lpfc_free_sgl_list(phba, &sglq_list);
+}
+
+/**
+ * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's nvmet sgl list and memory.
+ **/
+static void
+lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	LIST_HEAD(sglq_list);
+
+	/* Retrieve all nvmet sgls from driver list */
+	spin_lock_irq(&phba->hbalock);
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Now free the sgl list */
+	list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
+		list_del(&sglq_entry->list);
+		lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
+		kfree(sglq_entry);
+	}
+
+	/* Update the nvmet_xri_cnt to reflect no current sgls.
+	 * The next initialization cycle sets the count and allocates
+	 * the sgls over again.
+	 */
+	phba->sli4_hba.nvmet_xri_cnt = 0;
+}
+
+/**
+ * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate the driver's active sgl memory.
+ * This array will hold the sglq_entry's for active IOs.
+ **/
+static int
+lpfc_init_active_sgl_array(struct lpfc_hba *phba)
+{
+	int size;
+	size = sizeof(struct lpfc_sglq *);
+	size *= phba->sli4_hba.max_cfg_param.max_xri;
+
+	phba->sli4_hba.lpfc_sglq_active_list =
+		kzalloc(size, GFP_KERNEL);
+	if (!phba->sli4_hba.lpfc_sglq_active_list)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to walk through the array of active sglq entries
+ * and free all of the resources.
+ * This is just a place holder for now.
+ **/
+static void
+lpfc_free_active_sgl(struct lpfc_hba *phba)
+{
+	kfree(phba->sli4_hba.lpfc_sglq_active_list);
+}
+
+/**
+ * lpfc_init_sgl_list - Allocate and initialize sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's sgl
+ * list and set up the sgl xritag tag array accordingly.
+ *
+ **/
+static void
+lpfc_init_sgl_list(struct lpfc_hba *phba)
+{
+	/* Initialize and populate the sglq list per host/VF. */
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+
+	/* els xri-sgl book keeping */
+	phba->sli4_hba.els_xri_cnt = 0;
+
+	/* scsi xri-buffer book keeping */
+	phba->sli4_hba.scsi_xri_cnt = 0;
+
+	/* nvme xri-buffer book keeping */
+	phba->sli4_hba.nvme_xri_cnt = 0;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 	0 - successful
+ *	-ERROR - otherwise.
+ **/
+int
+lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
+{
+	int rc = 0;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		return rc;
+	if (phba->sli4_hba.extents_in_use)
+		return -EIO;
+
+	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+	if (!rpi_hdr) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0391 Error during rpi post operation\n");
+		lpfc_sli4_remove_rpis(phba);
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate a single 4KB memory region to
+ * support rpis and stores them in the phba.  This single region
+ * provides support for up to 64 rpis.  The region is used globally
+ * by the device.
+ *
+ * Returns:
+ *   A valid rpi hdr on success.
+ *   A NULL pointer on any failure.
+ **/
+struct lpfc_rpi_hdr *
+lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
+{
+	uint16_t rpi_limit, curr_rpi_range;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	/*
+	 * If the SLI4 port supports extents, posting the rpi header isn't
+	 * required.  Set the expected maximum count and let the actual value
+	 * get set when extents are fully allocated.
+	 */
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		return NULL;
+	if (phba->sli4_hba.extents_in_use)
+		return NULL;
+
+	/* The limit on the logical index is just the max_rpi count. */
+	rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
+
+	spin_lock_irq(&phba->hbalock);
+	/*
+	 * Establish the starting RPI in this header block.  The starting
+	 * rpi is normalized to a zero base because the physical rpi is
+	 * port based.
+	 */
+	curr_rpi_range = phba->sli4_hba.next_rpi;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Reached full RPI range */
+	if (curr_rpi_range == rpi_limit)
+		return NULL;
+
+	/*
+	 * First allocate the protocol header region for the port.  The
+	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
+	 */
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return NULL;
+
+	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+					   LPFC_HDR_TEMPLATE_SIZE,
+					   &dmabuf->phys, GFP_KERNEL);
+	if (!dmabuf->virt) {
+		rpi_hdr = NULL;
+		goto err_free_dmabuf;
+	}
+
+	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
+		rpi_hdr = NULL;
+		goto err_free_coherent;
+	}
+
+	/* Save the rpi header data for cleanup later. */
+	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
+	if (!rpi_hdr)
+		goto err_free_coherent;
+
+	rpi_hdr->dmabuf = dmabuf;
+	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
+	rpi_hdr->page_count = 1;
+	spin_lock_irq(&phba->hbalock);
+
+	/* The rpi_hdr stores the logical index only. */
+	rpi_hdr->start_rpi = curr_rpi_range;
+	rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
+	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
+
+	spin_unlock_irq(&phba->hbalock);
+	return rpi_hdr;
+
+ err_free_coherent:
+	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
+			  dmabuf->virt, dmabuf->phys);
+ err_free_dmabuf:
+	kfree(dmabuf);
+	return NULL;
+}
+
+/**
+ * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove all memory resources allocated
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
+ **/
+void
+lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
+{
+	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		goto exit;
+
+	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
+				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+		list_del(&rpi_hdr->list);
+		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
+				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
+		kfree(rpi_hdr->dmabuf);
+		kfree(rpi_hdr);
+	}
+ exit:
+	/* There are no rpis available to the port now. */
+	phba->sli4_hba.next_rpi = 0;
+}
+
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ *      pointer to @phba - successful
+ *      NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+	struct lpfc_hba *phba;
+
+	/* Allocate memory for HBA structure */
+	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+	if (!phba) {
+		dev_err(&pdev->dev, "failed to allocate hba struct\n");
+		return NULL;
+	}
+
+	/* Set reference to PCI device in HBA structure */
+	phba->pcidev = pdev;
+
+	/* Assign an unused board number */
+	phba->brd_no = lpfc_get_instance();
+	if (phba->brd_no < 0) {
+		kfree(phba);
+		return NULL;
+	}
+	phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
+
+	spin_lock_init(&phba->ct_ev_lock);
+	INIT_LIST_HEAD(&phba->ct_ev_waiters);
+
+	return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+	/* Release the driver assigned board number */
+	idr_remove(&lpfc_hba_index, phba->brd_no);
+
+	/* Free memory allocated with sli3 rings */
+	kfree(phba->sli.sli3_ring);
+	phba->sli.sli3_ring = NULL;
+
+	kfree(phba);
+	return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ *      0 - successful
+ *      other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport;
+	struct Scsi_Host  *shost;
+
+	/* Initialize HBA FC structure */
+	phba->fc_edtov = FF_DEF_EDTOV;
+	phba->fc_ratov = FF_DEF_RATOV;
+	phba->fc_altov = FF_DEF_ALTOV;
+	phba->fc_arbtov = FF_DEF_ARBTOV;
+
+	atomic_set(&phba->sdev_cnt, 0);
+	atomic_set(&phba->fc4ScsiInputRequests, 0);
+	atomic_set(&phba->fc4ScsiOutputRequests, 0);
+	atomic_set(&phba->fc4ScsiControlRequests, 0);
+	atomic_set(&phba->fc4ScsiIoCmpls, 0);
+	atomic_set(&phba->fc4NvmeInputRequests, 0);
+	atomic_set(&phba->fc4NvmeOutputRequests, 0);
+	atomic_set(&phba->fc4NvmeControlRequests, 0);
+	atomic_set(&phba->fc4NvmeIoCmpls, 0);
+	atomic_set(&phba->fc4NvmeLsRequests, 0);
+	atomic_set(&phba->fc4NvmeLsCmpls, 0);
+	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+	if (!vport)
+		return -ENODEV;
+
+	shost = lpfc_shost_from_vport(vport);
+	phba->pport = vport;
+
+	if (phba->nvmet_support) {
+		/* Only 1 vport (pport) will support NVME target */
+		if (phba->txrdy_payload_pool == NULL) {
+			phba->txrdy_payload_pool = dma_pool_create(
+				"txrdy_pool", &phba->pcidev->dev,
+				TXRDY_PAYLOAD_LEN, 16, 0);
+			if (phba->txrdy_payload_pool) {
+				phba->targetport = NULL;
+				phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+				lpfc_printf_log(phba, KERN_INFO,
+						LOG_INIT | LOG_NVME_DISC,
+						"6076 NVME Target Found\n");
+			}
+		}
+	}
+
+	lpfc_debugfs_initialize(vport);
+	/* Put reference to SCSI host to driver's device private data */
+	pci_set_drvdata(phba->pcidev, shost);
+
+	/*
+	 * At this point we are fully registered with PSA. In addition,
+	 * any initial discovery should be completed.
+	 */
+	vport->load_flag |= FC_ALLOW_FDMI;
+	if (phba->cfg_enable_SmartSAN ||
+	    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
+
+		/* Setup appropriate attribute masks */
+		vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
+		if (phba->cfg_enable_SmartSAN)
+			vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
+		else
+			vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to destroy HBA physical port and the associated
+ * SCSI host.
+ **/
+static void
+lpfc_destroy_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+
+	/* Destroy physical port that associated with the SCSI host */
+	destroy_port(vport);
+
+	return;
+}
+
+/**
+ * lpfc_setup_bg - Setup Block guard structures and debug areas.
+ * @phba: pointer to lpfc hba data structure.
+ * @shost: the shost to be used to detect Block guard settings.
+ *
+ * This routine sets up the local Block guard protocol settings for @shost.
+ * This routine also allocates memory for debugging bg buffers.
+ **/
+static void
+lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
+{
+	uint32_t old_mask;
+	uint32_t old_guard;
+
+	int pagecnt = 10;
+	if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1478 Registering BlockGuard with the "
+				"SCSI layer\n");
+
+		old_mask = phba->cfg_prot_mask;
+		old_guard = phba->cfg_prot_guard;
+
+		/* Only allow supported values */
+		phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+			SHOST_DIX_TYPE0_PROTECTION |
+			SHOST_DIX_TYPE1_PROTECTION);
+		phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
+					 SHOST_DIX_GUARD_CRC);
+
+		/* DIF Type 1 protection for profiles AST1/C1 is end to end */
+		if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+			phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+		if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+			if ((old_mask != phba->cfg_prot_mask) ||
+				(old_guard != phba->cfg_prot_guard))
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1475 Registering BlockGuard with the "
+					"SCSI layer: mask %d  guard %d\n",
+					phba->cfg_prot_mask,
+					phba->cfg_prot_guard);
+
+			scsi_host_set_prot(shost, phba->cfg_prot_mask);
+			scsi_host_set_guard(shost, phba->cfg_prot_guard);
+		} else
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1479 Not Registering BlockGuard with the SCSI "
+				"layer, Bad protection parameters: %d %d\n",
+				old_mask, old_guard);
+	}
+
+	if (!_dump_buf_data) {
+		while (pagecnt) {
+			spin_lock_init(&_dump_buf_lock);
+			_dump_buf_data =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_data) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9043 BLKGRD: allocated %d pages for "
+				       "_dump_buf_data at 0x%p\n",
+				       (1 << pagecnt), _dump_buf_data);
+				_dump_buf_data_order = pagecnt;
+				memset(_dump_buf_data, 0,
+				       ((1 << PAGE_SHIFT) << pagecnt));
+				break;
+			} else
+				--pagecnt;
+		}
+		if (!_dump_buf_data_order)
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9044 BLKGRD: ERROR unable to allocate "
+			       "memory for hexdump\n");
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
+		       "\n", _dump_buf_data);
+	if (!_dump_buf_dif) {
+		while (pagecnt) {
+			_dump_buf_dif =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_dif) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9046 BLKGRD: allocated %d pages for "
+				       "_dump_buf_dif at 0x%p\n",
+				       (1 << pagecnt), _dump_buf_dif);
+				_dump_buf_dif_order = pagecnt;
+				memset(_dump_buf_dif, 0,
+				       ((1 << PAGE_SHIFT) << pagecnt));
+				break;
+			} else
+				--pagecnt;
+		}
+		if (!_dump_buf_dif_order)
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9047 BLKGRD: ERROR unable to allocate "
+			       "memory for hexdump\n");
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
+		       _dump_buf_dif);
+}
+
+/**
+ * lpfc_post_init_setup - Perform necessary device post initialization setup.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to perform all the necessary post initialization
+ * setup for the device.
+ **/
+static void
+lpfc_post_init_setup(struct lpfc_hba *phba)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_adapter_event_header adapter_event;
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/*
+	 * hba setup may have changed the hba_queue_depth so we need to
+	 * adjust the value of can_queue.
+	 */
+	shost = pci_get_drvdata(phba->pcidev);
+	shost->can_queue = phba->cfg_hba_queue_depth - 10;
+	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+		lpfc_setup_bg(phba, shost);
+
+	lpfc_host_attrib_init(shost);
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		spin_lock_irq(shost->host_lock);
+		lpfc_poll_start_timer(phba);
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0428 Perform SCSI scan\n");
+	/* Send board arrival event to upper layer */
+	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(adapter_event),
+				  (char *) &adapter_event,
+				  LPFC_NL_VENDOR_ID);
+	return;
+}
+
+/**
+ * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-3 interface spec.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	unsigned long bar0map_len, bar2map_len;
+	int i, hbq_count;
+	void *ptr;
+	int error = -ENODEV;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return error;
+	else
+		pdev = phba->pcidev;
+
+	/* Set the device DMA mask size */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+			return error;
+		}
+	}
+
+	/* Get the bus address of Bar0 and Bar2 and the number of bytes
+	 * required by each mapping.
+	 */
+	phba->pci_bar0_map = pci_resource_start(pdev, 0);
+	bar0map_len = pci_resource_len(pdev, 0);
+
+	phba->pci_bar2_map = pci_resource_start(pdev, 2);
+	bar2map_len = pci_resource_len(pdev, 2);
+
+	/* Map HBA SLIM to a kernel virtual address. */
+	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+	if (!phba->slim_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLIM memory.\n");
+		goto out;
+	}
+
+	/* Map HBA Control Registers to a kernel virtual address. */
+	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+	if (!phba->ctrl_regs_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for HBA control registers.\n");
+		goto out_iounmap_slim;
+	}
+
+	/* Allocate memory for SLI-2 structures */
+	phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+						&phba->slim2p.phys, GFP_KERNEL);
+	if (!phba->slim2p.virt)
+		goto out_iounmap;
+
+	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+	phba->mbox_ext = (phba->slim2p.virt +
+		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
+	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+	phba->IOCBs = (phba->slim2p.virt +
+		       offsetof(struct lpfc_sli2_slim, IOCBs));
+
+	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
+						 lpfc_sli_hbq_size(),
+						 &phba->hbqslimp.phys,
+						 GFP_KERNEL);
+	if (!phba->hbqslimp.virt)
+		goto out_free_slim;
+
+	hbq_count = lpfc_sli_hbq_count();
+	ptr = phba->hbqslimp.virt;
+	for (i = 0; i < hbq_count; ++i) {
+		phba->hbqs[i].hbq_virt = ptr;
+		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+		ptr += (lpfc_hbq_defs[i]->entry_count *
+			sizeof(struct lpfc_hbq_entry));
+	}
+	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
+	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
+
+	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
+	phba->MBslimaddr = phba->slim_memmap_p;
+	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+	return 0;
+
+out_free_slim:
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+out_iounmap:
+	iounmap(phba->ctrl_regs_memmap_p);
+out_iounmap_slim:
+	iounmap(phba->slim_memmap_p);
+out:
+	return error;
+}
+
+/**
+ * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+
+	/* Free coherent DMA memory allocated */
+	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+
+	/* I/O memory unmap */
+	iounmap(phba->ctrl_regs_memmap_p);
+	iounmap(phba->slim_memmap_p);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
+ * done and check status.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+int
+lpfc_sli4_post_status_check(struct lpfc_hba *phba)
+{
+	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
+	struct lpfc_register reg_data;
+	int i, port_error = 0;
+	uint32_t if_type;
+
+	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+	memset(&reg_data, 0, sizeof(reg_data));
+	if (!phba->sli4_hba.PSMPHRregaddr)
+		return -ENODEV;
+
+	/* Wait up to 30 seconds for the SLI Port POST done and ready */
+	for (i = 0; i < 3000; i++) {
+		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+			&portsmphr_reg.word0) ||
+			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
+			/* Port has a fatal POST error, break out */
+			port_error = -ENODEV;
+			break;
+		}
+		if (LPFC_POST_STAGE_PORT_READY ==
+		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
+			break;
+		msleep(10);
+	}
+
+	/*
+	 * If there was a port error during POST, then don't proceed with
+	 * other register reads as the data may not be valid.  Just exit.
+	 */
+	if (port_error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"1408 Port Failed POST - portsmphr=0x%x, "
+			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
+			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
+			portsmphr_reg.word0,
+			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2534 Device Info: SLIFamily=0x%x, "
+				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
+				"SLIHint_2=0x%x, FT=0x%x\n",
+				bf_get(lpfc_sli_intf_sli_family,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_slirev,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_if_type,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_sli_hint1,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_sli_hint2,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_func_type,
+				       &phba->sli4_hba.sli_intf));
+		/*
+		 * Check for other Port errors during the initialization
+		 * process.  Fail the load if the port did not come up
+		 * correctly.
+		 */
+		if_type = bf_get(lpfc_sli_intf_if_type,
+				 &phba->sli4_hba.sli_intf);
+		switch (if_type) {
+		case LPFC_SLI_INTF_IF_TYPE_0:
+			phba->sli4_hba.ue_mask_lo =
+			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
+			phba->sli4_hba.ue_mask_hi =
+			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
+			uerrlo_reg.word0 =
+			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
+			uerrhi_reg.word0 =
+				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
+			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"1422 Unrecoverable Error "
+						"Detected during POST "
+						"uerr_lo_reg=0x%x, "
+						"uerr_hi_reg=0x%x, "
+						"ue_mask_lo_reg=0x%x, "
+						"ue_mask_hi_reg=0x%x\n",
+						uerrlo_reg.word0,
+						uerrhi_reg.word0,
+						phba->sli4_hba.ue_mask_lo,
+						phba->sli4_hba.ue_mask_hi);
+				port_error = -ENODEV;
+			}
+			break;
+		case LPFC_SLI_INTF_IF_TYPE_2:
+			/* Final checks.  The port status should be clean. */
+			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+				&reg_data.word0) ||
+				(bf_get(lpfc_sliport_status_err, &reg_data) &&
+				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
+				phba->work_status[0] =
+					readl(phba->sli4_hba.u.if_type2.
+					      ERR1regaddr);
+				phba->work_status[1] =
+					readl(phba->sli4_hba.u.if_type2.
+					      ERR2regaddr);
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2888 Unrecoverable port error "
+					"following POST: port status reg "
+					"0x%x, port_smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					reg_data.word0,
+					portsmphr_reg.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+				port_error = -ENODEV;
+			}
+			break;
+		case LPFC_SLI_INTF_IF_TYPE_1:
+		default:
+			break;
+		}
+	}
+	return port_error;
+}
+
+/**
+ * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @if_type:  The SLI4 interface type getting configured.
+ *
+ * This routine is invoked to set up SLI4 BAR0 PCI config space register
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
+{
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		phba->sli4_hba.u.if_type0.UERRLOregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
+		phba->sli4_hba.u.if_type0.UERRHIregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
+		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
+		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
+		phba->sli4_hba.SLIINTFregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		phba->sli4_hba.u.if_type2.EQDregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_EQ_DELAY_OFFSET;
+		phba->sli4_hba.u.if_type2.ERR1regaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_ER1_OFFSET;
+		phba->sli4_hba.u.if_type2.ERR2regaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_ER2_OFFSET;
+		phba->sli4_hba.u.if_type2.CTRLregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_CTL_OFFSET;
+		phba->sli4_hba.u.if_type2.STATUSregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_STA_OFFSET;
+		phba->sli4_hba.SLIINTFregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+		phba->sli4_hba.PSMPHRregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_SEM_OFFSET;
+		phba->sli4_hba.RQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_ULP0_RQ_DOORBELL;
+		phba->sli4_hba.WQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_ULP0_WQ_DOORBELL;
+		phba->sli4_hba.EQCQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
+		phba->sli4_hba.MQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
+		phba->sli4_hba.BMBXregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		dev_printk(KERN_ERR, &phba->pcidev->dev,
+			   "FATAL - unsupported SLI4 interface type - %d\n",
+			   if_type);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+{
+	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_SLIPORT_IF0_SMPHR;
+	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_HST_ISR0;
+	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_HST_IMR0;
+	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_HST_ISCR0;
+}
+
+/**
+ * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @vf: virtual function number
+ *
+ * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
+ * based on the given viftual function number, @vf.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+static int
+lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
+{
+	if (vf > LPFC_VIR_FUNC_MAX)
+		return -ENODEV;
+
+	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE +
+					LPFC_ULP0_RQ_DOORBELL);
+	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE +
+					LPFC_ULP0_WQ_DOORBELL);
+	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
+	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
+	return 0;
+}
+
+/**
+ * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create the bootstrap mailbox
+ * region consistent with the SLI-4 interface spec.  This
+ * routine allocates all memory necessary to communicate
+ * mailbox commands to the port and sets up all alignment
+ * needs.  No locks are expected to be held when calling
+ * this routine.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
+{
+	uint32_t bmbx_size;
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address *dma_address;
+	uint32_t pa_addr;
+	uint64_t phys_addr;
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	/*
+	 * The bootstrap mailbox region is comprised of 2 parts
+	 * plus an alignment restriction of 16 bytes.
+	 */
+	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
+	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
+					   &dmabuf->phys, GFP_KERNEL);
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Initialize the bootstrap mailbox pointers now so that the register
+	 * operations are simple later.  The mailbox dma address is required
+	 * to be 16-byte aligned.  Also align the virtual memory as each
+	 * maibox is copied into the bmbx mailbox region before issuing the
+	 * command to the port.
+	 */
+	phba->sli4_hba.bmbx.dmabuf = dmabuf;
+	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
+
+	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
+					      LPFC_ALIGN_16_BYTE);
+	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
+					      LPFC_ALIGN_16_BYTE);
+
+	/*
+	 * Set the high and low physical addresses now.  The SLI4 alignment
+	 * requirement is 16 bytes and the mailbox is posted to the port
+	 * as two 30-bit addresses.  The other data is a bit marking whether
+	 * the 30-bit address is the high or low address.
+	 * Upcast bmbx aphys to 64bits so shift instruction compiles
+	 * clean on 32 bit machines.
+	 */
+	dma_address = &phba->sli4_hba.bmbx.dma_address;
+	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
+	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
+	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
+					   LPFC_BMBX_BIT1_ADDR_HI);
+
+	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
+	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
+					   LPFC_BMBX_BIT1_ADDR_LO);
+	return 0;
+}
+
+/**
+ * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to teardown the bootstrap mailbox
+ * region and release all host resources. This routine requires
+ * the caller to ensure all mailbox commands recovered, no
+ * additional mailbox comands are sent, and interrupts are disabled
+ * before calling this routine.
+ *
+ **/
+static void
+lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
+{
+	dma_free_coherent(&phba->pcidev->dev,
+			  phba->sli4_hba.bmbx.bmbx_size,
+			  phba->sli4_hba.bmbx.dmabuf->virt,
+			  phba->sli4_hba.bmbx.dmabuf->phys);
+
+	kfree(phba->sli4_hba.bmbx.dmabuf);
+	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
+}
+
+/**
+ * lpfc_sli4_read_config - Get the config parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to read the configuration parameters from the HBA.
+ * The configuration parameters are used to set the base and maximum values
+ * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
+ * allocation for the port.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_read_config(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb;
+	struct lpfc_mbx_read_config *rd_config;
+	union  lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+	struct lpfc_mbx_get_func_cfg *get_func_cfg;
+	struct lpfc_rsrc_desc_fcfcoe *desc;
+	char *pdesc_0;
+	uint16_t forced_link_speed;
+	uint32_t if_type;
+	int length, i, rc = 0, rc2;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2011 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
+	}
+
+	lpfc_read_config(phba, pmb);
+
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2012 Mailbox failed , mbxCmd x%x "
+			"READ_CONFIG, mbxStatus x%x\n",
+			bf_get(lpfc_mqe_command, &pmb->u.mqe),
+			bf_get(lpfc_mqe_status, &pmb->u.mqe));
+		rc = -EIO;
+	} else {
+		rd_config = &pmb->u.mqe.un.rd_config;
+		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+			phba->sli4_hba.lnk_info.lnk_tp =
+				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+			phba->sli4_hba.lnk_info.lnk_no =
+				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"3081 lnk_type:%d, lnk_numb:%d\n",
+					phba->sli4_hba.lnk_info.lnk_tp,
+					phba->sli4_hba.lnk_info.lnk_no);
+		} else
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"3082 Mailbox (x%x) returned ldv:x0\n",
+					bf_get(lpfc_mqe_command, &pmb->u.mqe));
+		if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
+			phba->bbcredit_support = 1;
+			phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
+		}
+
+		phba->sli4_hba.extents_in_use =
+			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+		phba->sli4_hba.max_cfg_param.max_xri =
+			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		phba->sli4_hba.max_cfg_param.xri_base =
+			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_vpi =
+			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+		/* Limit the max we support */
+		if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
+			phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
+		phba->sli4_hba.max_cfg_param.vpi_base =
+			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_rpi =
+			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.rpi_base =
+			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_vfi =
+			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.vfi_base =
+			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_fcfi =
+			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_eq =
+			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_rq =
+			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_wq =
+			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_cq =
+			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
+		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
+		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
+		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
+		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
+		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
+				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
+		phba->max_vports = phba->max_vpi;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2003 cfg params Extents? %d "
+				"XRI(B:%d M:%d), "
+				"VPI(B:%d M:%d) "
+				"VFI(B:%d M:%d) "
+				"RPI(B:%d M:%d) "
+				"FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
+				phba->sli4_hba.extents_in_use,
+				phba->sli4_hba.max_cfg_param.xri_base,
+				phba->sli4_hba.max_cfg_param.max_xri,
+				phba->sli4_hba.max_cfg_param.vpi_base,
+				phba->sli4_hba.max_cfg_param.max_vpi,
+				phba->sli4_hba.max_cfg_param.vfi_base,
+				phba->sli4_hba.max_cfg_param.max_vfi,
+				phba->sli4_hba.max_cfg_param.rpi_base,
+				phba->sli4_hba.max_cfg_param.max_rpi,
+				phba->sli4_hba.max_cfg_param.max_fcfi,
+				phba->sli4_hba.max_cfg_param.max_eq,
+				phba->sli4_hba.max_cfg_param.max_cq,
+				phba->sli4_hba.max_cfg_param.max_wq,
+				phba->sli4_hba.max_cfg_param.max_rq);
+
+	}
+
+	if (rc)
+		goto read_cfg_out;
+
+	/* Update link speed if forced link speed is supported */
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		forced_link_speed =
+			bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
+		if (forced_link_speed) {
+			phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+
+			switch (forced_link_speed) {
+			case LINK_SPEED_1G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_1G;
+				break;
+			case LINK_SPEED_2G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_2G;
+				break;
+			case LINK_SPEED_4G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_4G;
+				break;
+			case LINK_SPEED_8G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_8G;
+				break;
+			case LINK_SPEED_10G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_10G;
+				break;
+			case LINK_SPEED_16G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_16G;
+				break;
+			case LINK_SPEED_32G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_32G;
+				break;
+			case 0xffff:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_AUTO;
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0047 Unrecognized link "
+						"speed : %d\n",
+						forced_link_speed);
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_AUTO;
+			}
+		}
+	}
+
+	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+	length = phba->sli4_hba.max_cfg_param.max_xri -
+			lpfc_sli4_get_els_iocb_cnt(phba);
+	if (phba->cfg_hba_queue_depth > length) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3361 HBA queue depth changed from %d to %d\n",
+				phba->cfg_hba_queue_depth, length);
+		phba->cfg_hba_queue_depth = length;
+	}
+
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_IF_TYPE_2)
+		goto read_cfg_out;
+
+	/* get the pf# and vf# for SLI4 if_type 2 port */
+	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc2 || shdr_status || shdr_add_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3026 Mailbox failed , mbxCmd x%x "
+				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+				bf_get(lpfc_mqe_command, &pmb->u.mqe),
+				bf_get(lpfc_mqe_status, &pmb->u.mqe));
+		goto read_cfg_out;
+	}
+
+	/* search for fc_fcoe resrouce descriptor */
+	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+
+	pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
+	desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
+	length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
+	if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
+		length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
+	else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
+		goto read_cfg_out;
+
+	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+		desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
+		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+		    bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
+			phba->sli4_hba.iov.pf_number =
+				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+			phba->sli4_hba.iov.vf_number =
+				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+			break;
+		}
+	}
+
+	if (i < LPFC_RSRC_DESC_MAX_NUM)
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+				phba->sli4_hba.iov.vf_number);
+	else
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3028 GET_FUNCTION_CONFIG: failed to find "
+				"Resrouce Descriptor:x%x\n",
+				LPFC_RSRC_DESC_TYPE_FCFCOE);
+
+read_cfg_out:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to setup the port-side endian order when
+ * the port if_type is 0.  This routine has no function for other
+ * if_types.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_setup_endian_order(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	uint32_t if_type, rc = 0;
+	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
+				      HOST_ENDIAN_HIGH_WORD1};
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+		if (!mboxq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0492 Unable to allocate memory for "
+					"issuing SLI_CONFIG_SPECIAL mailbox "
+					"command\n");
+			return -ENOMEM;
+		}
+
+		/*
+		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
+		 * two words to contain special data values and no other data.
+		 */
+		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0493 SLI_CONFIG_SPECIAL mailbox "
+					"failed with status x%x\n",
+					rc);
+			rc = -EIO;
+		}
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_queue_verify - Verify and update EQ counts
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to check the user settable queue counts for EQs.
+ * After this routine is called the counts will be set to valid values that
+ * adhere to the constraints of the system's interrupt vectors and the port's
+ * queue resources.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+static int
+lpfc_sli4_queue_verify(struct lpfc_hba *phba)
+{
+	int io_channel;
+	int fof_vectors = phba->cfg_fof ? 1 : 0;
+
+	/*
+	 * Sanity check for configured queue parameters against the run-time
+	 * device parameters
+	 */
+
+	/* Sanity check on HBA EQ parameters */
+	io_channel = phba->io_channel_irqs;
+
+	if (phba->sli4_hba.num_online_cpu < io_channel) {
+		lpfc_printf_log(phba,
+				KERN_ERR, LOG_INIT,
+				"3188 Reducing IO channels to match number of "
+				"online CPUs: from %d to %d\n",
+				io_channel, phba->sli4_hba.num_online_cpu);
+		io_channel = phba->sli4_hba.num_online_cpu;
+	}
+
+	if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2575 Reducing IO channels to match number of "
+				"available EQs: from %d to %d\n",
+				io_channel,
+				phba->sli4_hba.max_cfg_param.max_eq);
+		io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
+	}
+
+	/* The actual number of FCP / NVME event queues adopted */
+	if (io_channel != phba->io_channel_irqs)
+		phba->io_channel_irqs = io_channel;
+	if (phba->cfg_fcp_io_channel > io_channel)
+		phba->cfg_fcp_io_channel = io_channel;
+	if (phba->cfg_nvme_io_channel > io_channel)
+		phba->cfg_nvme_io_channel = io_channel;
+	if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+		phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
+			phba->io_channel_irqs, phba->cfg_fcp_io_channel,
+			phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
+
+	/* Get EQ depth from module parameter, fake the default for now */
+	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+
+	/* Get CQ depth from module parameter, fake the default for now */
+	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+	return 0;
+}
+
+static int
+lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
+{
+	struct lpfc_queue *qdesc;
+	int cnt;
+
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+					    phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0508 Failed allocate fast-path NVME CQ (%d)\n",
+				wqidx);
+		return 1;
+	}
+	phba->sli4_hba.nvme_cq[wqidx] = qdesc;
+
+	cnt = LPFC_NVME_WQSIZE;
+	qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0509 Failed allocate fast-path NVME WQ (%d)\n",
+				wqidx);
+		return 1;
+	}
+	phba->sli4_hba.nvme_wq[wqidx] = qdesc;
+	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+	return 0;
+}
+
+static int
+lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
+{
+	struct lpfc_queue *qdesc;
+	uint32_t wqesize;
+
+	/* Create Fast Path FCP CQs */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+					phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
+		return 1;
+	}
+	phba->sli4_hba.fcp_cq[wqidx] = qdesc;
+
+	/* Create Fast Path FCP WQs */
+	wqesize = (phba->fcp_embed_io) ?
+		LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+	qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0503 Failed allocate fast-path FCP WQ (%d)\n",
+				wqidx);
+		return 1;
+	}
+	phba->sli4_hba.fcp_wq[wqidx] = qdesc;
+	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+	return 0;
+}
+
+/**
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No availble memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
+{
+	struct lpfc_queue *qdesc;
+	int idx, io_channel;
+
+	/*
+	 * Create HBA Record arrays.
+	 * Both NVME and FCP will share that same vectors / EQs
+	 */
+	io_channel = phba->io_channel_irqs;
+	if (!io_channel)
+		return -ERANGE;
+
+	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+	phba->sli4_hba.hba_eq =  kcalloc(io_channel,
+					sizeof(struct lpfc_queue *),
+					GFP_KERNEL);
+	if (!phba->sli4_hba.hba_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2576 Failed allocate memory for "
+			"fast-path EQ record array\n");
+		goto out_error;
+	}
+
+	if (phba->cfg_fcp_io_channel) {
+		phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
+						sizeof(struct lpfc_queue *),
+						GFP_KERNEL);
+		if (!phba->sli4_hba.fcp_cq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2577 Failed allocate memory for "
+					"fast-path CQ record array\n");
+			goto out_error;
+		}
+		phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
+						sizeof(struct lpfc_queue *),
+						GFP_KERNEL);
+		if (!phba->sli4_hba.fcp_wq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2578 Failed allocate memory for "
+					"fast-path FCP WQ record array\n");
+			goto out_error;
+		}
+		/*
+		 * Since the first EQ can have multiple CQs associated with it,
+		 * this array is used to quickly see if we have a FCP fast-path
+		 * CQ match.
+		 */
+		phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
+							sizeof(uint16_t),
+							GFP_KERNEL);
+		if (!phba->sli4_hba.fcp_cq_map) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2545 Failed allocate memory for "
+					"fast-path CQ map\n");
+			goto out_error;
+		}
+	}
+
+	if (phba->cfg_nvme_io_channel) {
+		phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
+						sizeof(struct lpfc_queue *),
+						GFP_KERNEL);
+		if (!phba->sli4_hba.nvme_cq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6077 Failed allocate memory for "
+					"fast-path CQ record array\n");
+			goto out_error;
+		}
+
+		phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
+						sizeof(struct lpfc_queue *),
+						GFP_KERNEL);
+		if (!phba->sli4_hba.nvme_wq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2581 Failed allocate memory for "
+					"fast-path NVME WQ record array\n");
+			goto out_error;
+		}
+
+		/*
+		 * Since the first EQ can have multiple CQs associated with it,
+		 * this array is used to quickly see if we have a NVME fast-path
+		 * CQ match.
+		 */
+		phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
+							sizeof(uint16_t),
+							GFP_KERNEL);
+		if (!phba->sli4_hba.nvme_cq_map) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6078 Failed allocate memory for "
+					"fast-path CQ map\n");
+			goto out_error;
+		}
+
+		if (phba->nvmet_support) {
+			phba->sli4_hba.nvmet_cqset = kcalloc(
+					phba->cfg_nvmet_mrq,
+					sizeof(struct lpfc_queue *),
+					GFP_KERNEL);
+			if (!phba->sli4_hba.nvmet_cqset) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3121 Fail allocate memory for "
+					"fast-path CQ set array\n");
+				goto out_error;
+			}
+			phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
+					phba->cfg_nvmet_mrq,
+					sizeof(struct lpfc_queue *),
+					GFP_KERNEL);
+			if (!phba->sli4_hba.nvmet_mrq_hdr) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3122 Fail allocate memory for "
+					"fast-path RQ set hdr array\n");
+				goto out_error;
+			}
+			phba->sli4_hba.nvmet_mrq_data = kcalloc(
+					phba->cfg_nvmet_mrq,
+					sizeof(struct lpfc_queue *),
+					GFP_KERNEL);
+			if (!phba->sli4_hba.nvmet_mrq_data) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3124 Fail allocate memory for "
+					"fast-path RQ set data array\n");
+				goto out_error;
+			}
+		}
+	}
+
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+
+	/* Create HBA Event Queues (EQs) */
+	for (idx = 0; idx < io_channel; idx++) {
+		/* Create EQs */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+					      phba->sli4_hba.eq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0497 Failed allocate EQ (%d)\n", idx);
+			goto out_error;
+		}
+		phba->sli4_hba.hba_eq[idx] = qdesc;
+	}
+
+	/* FCP and NVME io channels are not required to be balanced */
+
+	for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+		if (lpfc_alloc_fcp_wq_cq(phba, idx))
+			goto out_error;
+
+	for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+		if (lpfc_alloc_nvme_wq_cq(phba, idx))
+			goto out_error;
+
+	if (phba->nvmet_support) {
+		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+			qdesc = lpfc_sli4_queue_alloc(phba,
+					phba->sli4_hba.cq_esize,
+					phba->sli4_hba.cq_ecount);
+			if (!qdesc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3142 Failed allocate NVME "
+					"CQ Set (%d)\n", idx);
+				goto out_error;
+			}
+			phba->sli4_hba.nvmet_cqset[idx] = qdesc;
+		}
+	}
+
+	/*
+	 * Create Slow Path Completion Queues (CQs)
+	 */
+
+	/* Create slow-path Mailbox Command Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0500 Failed allocate slow-path mailbox CQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.mbx_cq = qdesc;
+
+	/* Create slow-path ELS Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0501 Failed allocate slow-path ELS CQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.els_cq = qdesc;
+
+
+	/*
+	 * Create Slow Path Work Queues (WQs)
+	 */
+
+	/* Create Mailbox Command Queue */
+
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+				      phba->sli4_hba.mq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0505 Failed allocate slow-path MQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.mbx_wq = qdesc;
+
+	/*
+	 * Create ELS Work Queues
+	 */
+
+	/* Create slow-path ELS Work Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+				      phba->sli4_hba.wq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0504 Failed allocate slow-path ELS WQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.els_wq = qdesc;
+	list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		/* Create NVME LS Complete Queue */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+					      phba->sli4_hba.cq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6079 Failed allocate NVME LS CQ\n");
+			goto out_error;
+		}
+		phba->sli4_hba.nvmels_cq = qdesc;
+
+		/* Create NVME LS Work Queue */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+					      phba->sli4_hba.wq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6080 Failed allocate NVME LS WQ\n");
+			goto out_error;
+		}
+		phba->sli4_hba.nvmels_wq = qdesc;
+		list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+	}
+
+	/*
+	 * Create Receive Queue (RQ)
+	 */
+
+	/* Create Receive Queue for header */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+				      phba->sli4_hba.rq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0506 Failed allocate receive HRQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.hdr_rq = qdesc;
+
+	/* Create Receive Queue for data */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+				      phba->sli4_hba.rq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0507 Failed allocate receive DRQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.dat_rq = qdesc;
+
+	if (phba->nvmet_support) {
+		for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+			/* Create NVMET Receive Queue for header */
+			qdesc = lpfc_sli4_queue_alloc(phba,
+						      phba->sli4_hba.rq_esize,
+						      LPFC_NVMET_RQE_DEF_COUNT);
+			if (!qdesc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"3146 Failed allocate "
+						"receive HRQ\n");
+				goto out_error;
+			}
+			phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
+
+			/* Only needed for header of RQ pair */
+			qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
+					      GFP_KERNEL);
+			if (qdesc->rqbp == NULL) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6131 Failed allocate "
+						"Header RQBP\n");
+				goto out_error;
+			}
+
+			/* Put list in known state in case driver load fails. */
+			INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
+
+			/* Create NVMET Receive Queue for data */
+			qdesc = lpfc_sli4_queue_alloc(phba,
+						      phba->sli4_hba.rq_esize,
+						      LPFC_NVMET_RQE_DEF_COUNT);
+			if (!qdesc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"3156 Failed allocate "
+						"receive DRQ\n");
+				goto out_error;
+			}
+			phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
+		}
+	}
+
+	/* Create the Queues needed for Flash Optimized Fabric operations */
+	if (phba->cfg_fof)
+		lpfc_fof_queue_create(phba);
+	return 0;
+
+out_error:
+	lpfc_sli4_queue_destroy(phba);
+	return -ENOMEM;
+}
+
+static inline void
+__lpfc_sli4_release_queue(struct lpfc_queue **qp)
+{
+	if (*qp != NULL) {
+		lpfc_sli4_queue_free(*qp);
+		*qp = NULL;
+	}
+}
+
+static inline void
+lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
+{
+	int idx;
+
+	if (*qs == NULL)
+		return;
+
+	for (idx = 0; idx < max; idx++)
+		__lpfc_sli4_release_queue(&(*qs)[idx]);
+
+	kfree(*qs);
+	*qs = NULL;
+}
+
+static inline void
+lpfc_sli4_release_queue_map(uint16_t **qmap)
+{
+	if (*qmap != NULL) {
+		kfree(*qmap);
+		*qmap = NULL;
+	}
+}
+
+/**
+ * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+{
+	if (phba->cfg_fof)
+		lpfc_fof_queue_destroy(phba);
+
+	/* Release HBA eqs */
+	lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
+
+	/* Release FCP cqs */
+	lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
+				 phba->cfg_fcp_io_channel);
+
+	/* Release FCP wqs */
+	lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
+				 phba->cfg_fcp_io_channel);
+
+	/* Release FCP CQ mapping array */
+	lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
+
+	/* Release NVME cqs */
+	lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
+					phba->cfg_nvme_io_channel);
+
+	/* Release NVME wqs */
+	lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
+					phba->cfg_nvme_io_channel);
+
+	/* Release NVME CQ mapping array */
+	lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+
+	lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+					phba->cfg_nvmet_mrq);
+
+	lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+					phba->cfg_nvmet_mrq);
+	lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+					phba->cfg_nvmet_mrq);
+
+	/* Release mailbox command work queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
+
+	/* Release ELS work queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
+
+	/* Release ELS work queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
+
+	/* Release unsolicited receive queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
+	__lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
+
+	/* Release ELS complete queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
+
+	/* Release NVME LS complete queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
+
+	/* Release mailbox command complete queue */
+	__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
+
+	/* Everything on this list has been freed */
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+}
+
+int
+lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
+{
+	struct lpfc_rqb *rqbp;
+	struct lpfc_dmabuf *h_buf;
+	struct rqb_dmabuf *rqb_buffer;
+
+	rqbp = rq->rqbp;
+	while (!list_empty(&rqbp->rqb_buffer_list)) {
+		list_remove_head(&rqbp->rqb_buffer_list, h_buf,
+				 struct lpfc_dmabuf, list);
+
+		rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
+		(rqbp->rqb_free_buffer)(phba, rqb_buffer);
+		rqbp->buffer_count--;
+	}
+	return 1;
+}
+
+static int
+lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+	struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
+	int qidx, uint32_t qtype)
+{
+	struct lpfc_sli_ring *pring;
+	int rc;
+
+	if (!eq || !cq || !wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"6085 Fast-path %s (%d) not allocated\n",
+			((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
+		return -ENOMEM;
+	}
+
+	/* create the Cq first */
+	rc = lpfc_cq_create(phba, cq, eq,
+			(qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"6086 Failed setup of CQ (%d), rc = 0x%x\n",
+			qidx, (uint32_t)rc);
+		return rc;
+	}
+
+	if (qtype != LPFC_MBOX) {
+		/* Setup nvme_cq_map for fast lookup */
+		if (cq_map)
+			*cq_map = cq->queue_id;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
+			qidx, cq->queue_id, qidx, eq->queue_id);
+
+		/* create the wq */
+		rc = lpfc_wq_create(phba, wq, cq, qtype);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
+				qidx, (uint32_t)rc);
+			/* no need to tear down cq - caller will do so */
+			return rc;
+		}
+
+		/* Bind this CQ/WQ to the NVME ring */
+		pring = wq->pring;
+		pring->sli.sli4.wqp = (void *)wq;
+		cq->pring = pring;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
+			qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
+	} else {
+		rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0539 Failed setup of slow-path MQ: "
+				"rc = 0x%x\n", rc);
+			/* no need to tear down cq - caller will do so */
+			return rc;
+		}
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+			phba->sli4_hba.mbx_wq->queue_id,
+			phba->sli4_hba.mbx_cq->queue_id);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_queue_setup - Set up all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+{
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	LPFC_MBOXQ_t *mboxq;
+	int qidx;
+	uint32_t length, io_channel;
+	int rc = -ENOMEM;
+
+	/* Check for dual-ULP support */
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3249 Unable to allocate memory for "
+				"QUERY_FW_CFG mailbox command\n");
+		return -ENOMEM;
+	}
+	length = (sizeof(struct lpfc_mbx_query_fw_config) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+	shdr = (union lpfc_sli4_cfg_shdr *)
+			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3250 QUERY_FW_CFG mailbox failed with status "
+				"x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mboxq, phba->mbox_mem_pool);
+		rc = -ENXIO;
+		goto out_error;
+	}
+
+	phba->sli4_hba.fw_func_mode =
+			mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
+	phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
+	phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+	phba->sli4_hba.physical_port =
+			mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
+			"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+			phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+
+	/*
+	 * Set up HBA Event Queues (EQs)
+	 */
+	io_channel = phba->io_channel_irqs;
+
+	/* Set up HBA event queue */
+	if (io_channel && !phba->sli4_hba.hba_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3147 Fast-path EQs not allocated\n");
+		rc = -ENOMEM;
+		goto out_error;
+	}
+	for (qidx = 0; qidx < io_channel; qidx++) {
+		if (!phba->sli4_hba.hba_eq[qidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0522 Fast-path EQ (%d) not "
+					"allocated\n", qidx);
+			rc = -ENOMEM;
+			goto out_destroy;
+		}
+		rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
+						phba->cfg_fcp_imax);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0523 Failed setup of fast-path EQ "
+					"(%d), rc = 0x%x\n", qidx,
+					(uint32_t)rc);
+			goto out_destroy;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2584 HBA EQ setup: queue[%d]-id=%d\n",
+				qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
+	}
+
+	if (phba->cfg_nvme_io_channel) {
+		if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6084 Fast-path NVME %s array not allocated\n",
+				(phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
+			rc = -ENOMEM;
+			goto out_destroy;
+		}
+
+		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+			rc = lpfc_create_wq_cq(phba,
+					phba->sli4_hba.hba_eq[
+						qidx % io_channel],
+					phba->sli4_hba.nvme_cq[qidx],
+					phba->sli4_hba.nvme_wq[qidx],
+					&phba->sli4_hba.nvme_cq_map[qidx],
+					qidx, LPFC_NVME);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6123 Failed to setup fastpath "
+					"NVME WQ/CQ (%d), rc = 0x%x\n",
+					qidx, (uint32_t)rc);
+				goto out_destroy;
+			}
+		}
+	}
+
+	if (phba->cfg_fcp_io_channel) {
+		/* Set up fast-path FCP Response Complete Queue */
+		if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3148 Fast-path FCP %s array not allocated\n",
+				phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
+			rc = -ENOMEM;
+			goto out_destroy;
+		}
+
+		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
+			rc = lpfc_create_wq_cq(phba,
+					phba->sli4_hba.hba_eq[
+						qidx % io_channel],
+					phba->sli4_hba.fcp_cq[qidx],
+					phba->sli4_hba.fcp_wq[qidx],
+					&phba->sli4_hba.fcp_cq_map[qidx],
+					qidx, LPFC_FCP);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0535 Failed to setup fastpath "
+					"FCP WQ/CQ (%d), rc = 0x%x\n",
+					qidx, (uint32_t)rc);
+				goto out_destroy;
+			}
+		}
+	}
+
+	/*
+	 * Set up Slow Path Complete Queues (CQs)
+	 */
+
+	/* Set up slow-path MBOX CQ/MQ */
+
+	if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0528 %s not allocated\n",
+				phba->sli4_hba.mbx_cq ?
+				"Mailbox WQ" : "Mailbox CQ");
+		rc = -ENOMEM;
+		goto out_destroy;
+	}
+
+	rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+			       phba->sli4_hba.mbx_cq,
+			       phba->sli4_hba.mbx_wq,
+			       NULL, 0, LPFC_MBOX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
+			(uint32_t)rc);
+		goto out_destroy;
+	}
+	if (phba->nvmet_support) {
+		if (!phba->sli4_hba.nvmet_cqset) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3165 Fast-path NVME CQ Set "
+					"array not allocated\n");
+			rc = -ENOMEM;
+			goto out_destroy;
+		}
+		if (phba->cfg_nvmet_mrq > 1) {
+			rc = lpfc_cq_create_set(phba,
+					phba->sli4_hba.nvmet_cqset,
+					phba->sli4_hba.hba_eq,
+					LPFC_WCQ, LPFC_NVMET);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"3164 Failed setup of NVME CQ "
+						"Set, rc = 0x%x\n",
+						(uint32_t)rc);
+				goto out_destroy;
+			}
+		} else {
+			/* Set up NVMET Receive Complete Queue */
+			rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
+					    phba->sli4_hba.hba_eq[0],
+					    LPFC_WCQ, LPFC_NVMET);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6089 Failed setup NVMET CQ: "
+						"rc = 0x%x\n", (uint32_t)rc);
+				goto out_destroy;
+			}
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"6090 NVMET CQ setup: cq-id=%d, "
+					"parent eq-id=%d\n",
+					phba->sli4_hba.nvmet_cqset[0]->queue_id,
+					phba->sli4_hba.hba_eq[0]->queue_id);
+		}
+	}
+
+	/* Set up slow-path ELS WQ/CQ */
+	if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0530 ELS %s not allocated\n",
+				phba->sli4_hba.els_cq ? "WQ" : "CQ");
+		rc = -ENOMEM;
+		goto out_destroy;
+	}
+	rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+					phba->sli4_hba.els_cq,
+					phba->sli4_hba.els_wq,
+					NULL, 0, LPFC_ELS);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
+			(uint32_t)rc);
+		goto out_destroy;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
+			phba->sli4_hba.els_wq->queue_id,
+			phba->sli4_hba.els_cq->queue_id);
+
+	if (phba->cfg_nvme_io_channel) {
+		/* Set up NVME LS Complete Queue */
+		if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6091 LS %s not allocated\n",
+					phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
+			rc = -ENOMEM;
+			goto out_destroy;
+		}
+		rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+					phba->sli4_hba.nvmels_cq,
+					phba->sli4_hba.nvmels_wq,
+					NULL, 0, LPFC_NVME_LS);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0529 Failed setup of NVVME LS WQ/CQ: "
+				"rc = 0x%x\n", (uint32_t)rc);
+			goto out_destroy;
+		}
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"6096 ELS WQ setup: wq-id=%d, "
+				"parent cq-id=%d\n",
+				phba->sli4_hba.nvmels_wq->queue_id,
+				phba->sli4_hba.nvmels_cq->queue_id);
+	}
+
+	/*
+	 * Create NVMET Receive Queue (RQ)
+	 */
+	if (phba->nvmet_support) {
+		if ((!phba->sli4_hba.nvmet_cqset) ||
+		    (!phba->sli4_hba.nvmet_mrq_hdr) ||
+		    (!phba->sli4_hba.nvmet_mrq_data)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6130 MRQ CQ Queues not "
+					"allocated\n");
+			rc = -ENOMEM;
+			goto out_destroy;
+		}
+		if (phba->cfg_nvmet_mrq > 1) {
+			rc = lpfc_mrq_create(phba,
+					     phba->sli4_hba.nvmet_mrq_hdr,
+					     phba->sli4_hba.nvmet_mrq_data,
+					     phba->sli4_hba.nvmet_cqset,
+					     LPFC_NVMET);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6098 Failed setup of NVMET "
+						"MRQ: rc = 0x%x\n",
+						(uint32_t)rc);
+				goto out_destroy;
+			}
+
+		} else {
+			rc = lpfc_rq_create(phba,
+					    phba->sli4_hba.nvmet_mrq_hdr[0],
+					    phba->sli4_hba.nvmet_mrq_data[0],
+					    phba->sli4_hba.nvmet_cqset[0],
+					    LPFC_NVMET);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6057 Failed setup of NVMET "
+						"Receive Queue: rc = 0x%x\n",
+						(uint32_t)rc);
+				goto out_destroy;
+			}
+
+			lpfc_printf_log(
+				phba, KERN_INFO, LOG_INIT,
+				"6099 NVMET RQ setup: hdr-rq-id=%d, "
+				"dat-rq-id=%d parent cq-id=%d\n",
+				phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
+				phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
+				phba->sli4_hba.nvmet_cqset[0]->queue_id);
+
+		}
+	}
+
+	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0540 Receive Queue not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy;
+	}
+
+	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+			    phba->sli4_hba.els_cq, LPFC_USOL);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0541 Failed setup of Receive Queue: "
+				"rc = 0x%x\n", (uint32_t)rc);
+		goto out_destroy;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
+			"parent cq-id=%d\n",
+			phba->sli4_hba.hdr_rq->queue_id,
+			phba->sli4_hba.dat_rq->queue_id,
+			phba->sli4_hba.els_cq->queue_id);
+
+	if (phba->cfg_fof) {
+		rc = lpfc_fof_queue_setup(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0549 Failed setup of FOF Queues: "
+					"rc = 0x%x\n", rc);
+			goto out_destroy;
+		}
+	}
+
+	for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
+		lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
+					 phba->cfg_fcp_imax);
+
+	return 0;
+
+out_destroy:
+	lpfc_sli4_queue_unset(phba);
+out_error:
+	return rc;
+}
+
+/**
+ * lpfc_sli4_queue_unset - Unset all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_unset(struct lpfc_hba *phba)
+{
+	int qidx;
+
+	/* Unset the queues created for Flash Optimized Fabric operations */
+	if (phba->cfg_fof)
+		lpfc_fof_queue_destroy(phba);
+
+	/* Unset mailbox command work queue */
+	if (phba->sli4_hba.mbx_wq)
+		lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+
+	/* Unset NVME LS work queue */
+	if (phba->sli4_hba.nvmels_wq)
+		lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
+
+	/* Unset ELS work queue */
+	if (phba->sli4_hba.els_wq)
+		lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+
+	/* Unset unsolicited receive queue */
+	if (phba->sli4_hba.hdr_rq)
+		lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
+				phba->sli4_hba.dat_rq);
+
+	/* Unset FCP work queue */
+	if (phba->sli4_hba.fcp_wq)
+		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
+			lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
+
+	/* Unset NVME work queue */
+	if (phba->sli4_hba.nvme_wq) {
+		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
+			lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
+	}
+
+	/* Unset mailbox command complete queue */
+	if (phba->sli4_hba.mbx_cq)
+		lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+
+	/* Unset ELS complete queue */
+	if (phba->sli4_hba.els_cq)
+		lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+
+	/* Unset NVME LS complete queue */
+	if (phba->sli4_hba.nvmels_cq)
+		lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
+
+	/* Unset NVME response complete queue */
+	if (phba->sli4_hba.nvme_cq)
+		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
+			lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
+
+	/* Unset NVMET MRQ queue */
+	if (phba->sli4_hba.nvmet_mrq_hdr) {
+		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+			lpfc_rq_destroy(phba,
+					phba->sli4_hba.nvmet_mrq_hdr[qidx],
+					phba->sli4_hba.nvmet_mrq_data[qidx]);
+	}
+
+	/* Unset NVMET CQ Set complete queue */
+	if (phba->sli4_hba.nvmet_cqset) {
+		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+			lpfc_cq_destroy(phba,
+					phba->sli4_hba.nvmet_cqset[qidx]);
+	}
+
+	/* Unset FCP response complete queue */
+	if (phba->sli4_hba.fcp_cq)
+		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
+			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
+
+	/* Unset fast-path event queue */
+	if (phba->sli4_hba.hba_eq)
+		for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
+			lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and set up a pool of completion queue
+ * events. The body of the completion queue event is a completion queue entry
+ * CQE. For now, this pool is used for the interrupt service routine to queue
+ * the following HBA completion queue events for the worker thread to process:
+ *   - Mailbox asynchronous events
+ *   - Receive queue completion unsolicited events
+ * Later, this can be used for all the slow-path events.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+static int
+lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+	int i;
+
+	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
+		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
+		if (!cq_event)
+			goto out_pool_create_fail;
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_cqe_event_pool);
+	}
+	return 0;
+
+out_pool_create_fail:
+	lpfc_sli4_cq_event_pool_destroy(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the pool of completion queue events at
+ * driver unload time. Note that, it is the responsibility of the driver
+ * cleanup routine to free all the outstanding completion-queue events
+ * allocated from this pool back into the pool before invoking this routine
+ * to destroy the pool.
+ **/
+static void
+lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event, *next_cq_event;
+
+	list_for_each_entry_safe(cq_event, next_cq_event,
+				 &phba->sli4_hba.sp_cqe_event_pool, list) {
+		list_del(&cq_event->list);
+		kfree(cq_event);
+	}
+}
+
+/**
+ * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock free version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event = NULL;
+
+	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
+			 struct lpfc_cq_event, list);
+	return cq_event;
+}
+
+/**
+ * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cq_event = __lpfc_sli4_cq_event_alloc(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return cq_event;
+}
+
+/**
+ * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock free version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+			     struct lpfc_cq_event *cq_event)
+{
+	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
+}
+
+/**
+ * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+			   struct lpfc_cq_event *cq_event)
+{
+	unsigned long iflags;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_sli4_cq_event_release(phba, cq_event);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the pending completion-queue events to the
+ * back into the free pool for device reset.
+ **/
+static void
+lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
+{
+	LIST_HEAD(cqelist);
+	struct lpfc_cq_event *cqe;
+	unsigned long iflags;
+
+	/* Retrieve all the pending WCQEs from pending WCQE lists */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Pending FCP XRI abort events */
+	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+			 &cqelist);
+	/* Pending ELS XRI abort events */
+	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+			 &cqelist);
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		/* Pending NVME XRI abort events */
+		list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+				 &cqelist);
+	}
+	/* Pending asynnc events */
+	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+			 &cqelist);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	while (!list_empty(&cqelist)) {
+		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
+		lpfc_sli4_cq_event_release(phba, cqe);
+	}
+}
+
+/**
+ * lpfc_pci_function_reset - Reset pci function.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request a PCI function reset. It will destroys
+ * all resources assigned to the PCI function which originates this request.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_pci_function_reset(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	uint32_t rc = 0, if_type;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t rdy_chk;
+	uint32_t port_reset = 0;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_register reg_data;
+	uint16_t devid;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+		if (!mboxq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0494 Unable to allocate memory for "
+					"issuing SLI_FUNCTION_RESET mailbox "
+					"command\n");
+			return -ENOMEM;
+		}
+
+		/* Setup PCI function reset mailbox-ioctl command */
+		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+				 LPFC_SLI4_MBX_EMBED);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		shdr = (union lpfc_sli4_cfg_shdr *)
+			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mboxq, phba->mbox_mem_pool);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0495 SLI_FUNCTION_RESET mailbox "
+					"failed with status x%x add_status x%x,"
+					" mbx status x%x\n",
+					shdr_status, shdr_add_status, rc);
+			rc = -ENXIO;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+wait:
+		/*
+		 * Poll the Port Status Register and wait for RDY for
+		 * up to 30 seconds. If the port doesn't respond, treat
+		 * it as an error.
+		 */
+		for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
+			if (lpfc_readl(phba->sli4_hba.u.if_type2.
+				STATUSregaddr, &reg_data.word0)) {
+				rc = -ENODEV;
+				goto out;
+			}
+			if (bf_get(lpfc_sliport_status_rdy, &reg_data))
+				break;
+			msleep(20);
+		}
+
+		if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
+			phba->work_status[0] = readl(
+				phba->sli4_hba.u.if_type2.ERR1regaddr);
+			phba->work_status[1] = readl(
+				phba->sli4_hba.u.if_type2.ERR2regaddr);
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2890 Port not ready, port status reg "
+					"0x%x error 1=0x%x, error 2=0x%x\n",
+					reg_data.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+			rc = -ENODEV;
+			goto out;
+		}
+
+		if (!port_reset) {
+			/*
+			 * Reset the port now
+			 */
+			reg_data.word0 = 0;
+			bf_set(lpfc_sliport_ctrl_end, &reg_data,
+			       LPFC_SLIPORT_LITTLE_ENDIAN);
+			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
+			       LPFC_SLIPORT_INIT_PORT);
+			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
+			       CTRLregaddr);
+			/* flush */
+			pci_read_config_word(phba->pcidev,
+					     PCI_DEVICE_ID, &devid);
+
+			port_reset = 1;
+			msleep(20);
+			goto wait;
+		} else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+			rc = -ENODEV;
+			goto out;
+		}
+		break;
+
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
+
+out:
+	/* Catch the not-ready port failure after a port reset. */
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3317 HBA not functional: IP Reset Failed "
+				"try: echo fw_reset > board_mode\n");
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	unsigned long bar0map_len, bar1map_len, bar2map_len;
+	int error = -ENODEV;
+	uint32_t if_type;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return error;
+	else
+		pdev = phba->pcidev;
+
+	/* Set the device DMA mask size */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+			return error;
+		}
+	}
+
+	/*
+	 * The BARs and register set definitions and offset locations are
+	 * dependent on the if_type.
+	 */
+	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
+				  &phba->sli4_hba.sli_intf.word0)) {
+		return error;
+	}
+
+	/* There is no SLI3 failback for SLI4 devices. */
+	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_VALID) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2894 SLI_INTF reg contents invalid "
+				"sli_intf reg 0x%x\n",
+				phba->sli4_hba.sli_intf.word0);
+		return error;
+	}
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	/*
+	 * Get the bus address of SLI4 device Bar regions and the
+	 * number of bytes required by each mapping. The mapping of the
+	 * particular PCI BARs regions is dependent on the type of
+	 * SLI4 device.
+	 */
+	if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
+		phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+		bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
+
+		/*
+		 * Map SLI4 PCI Config Space Register base to a kernel virtual
+		 * addr
+		 */
+		phba->sli4_hba.conf_regs_memmap_p =
+			ioremap(phba->pci_bar0_map, bar0map_len);
+		if (!phba->sli4_hba.conf_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "ioremap failed for SLI4 PCI config "
+				   "registers.\n");
+			goto out;
+		}
+		phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
+		/* Set up BAR0 PCI config space register memory map */
+		lpfc_sli4_bar0_register_memmap(phba, if_type);
+	} else {
+		phba->pci_bar0_map = pci_resource_start(pdev, 1);
+		bar0map_len = pci_resource_len(pdev, 1);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			dev_printk(KERN_ERR, &pdev->dev,
+			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
+			goto out;
+		}
+		phba->sli4_hba.conf_regs_memmap_p =
+				ioremap(phba->pci_bar0_map, bar0map_len);
+		if (!phba->sli4_hba.conf_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				"ioremap failed for SLI4 PCI config "
+				"registers.\n");
+				goto out;
+		}
+		lpfc_sli4_bar0_register_memmap(phba, if_type);
+	}
+
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+			/*
+			 * Map SLI4 if type 0 HBA Control Register base to a
+			 * kernel virtual address and setup the registers.
+			 */
+			phba->pci_bar1_map = pci_resource_start(pdev,
+								PCI_64BIT_BAR2);
+			bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+			phba->sli4_hba.ctrl_regs_memmap_p =
+					ioremap(phba->pci_bar1_map,
+						bar1map_len);
+			if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+				dev_err(&pdev->dev,
+					   "ioremap failed for SLI4 HBA "
+					    "control registers.\n");
+				error = -ENOMEM;
+				goto out_iounmap_conf;
+			}
+			phba->pci_bar2_memmap_p =
+					 phba->sli4_hba.ctrl_regs_memmap_p;
+			lpfc_sli4_bar1_register_memmap(phba);
+		} else {
+			error = -ENOMEM;
+			goto out_iounmap_conf;
+		}
+	}
+
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+			/*
+			 * Map SLI4 if type 0 HBA Doorbell Register base to
+			 * a kernel virtual address and setup the registers.
+			 */
+			phba->pci_bar2_map = pci_resource_start(pdev,
+								PCI_64BIT_BAR4);
+			bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+			phba->sli4_hba.drbl_regs_memmap_p =
+					ioremap(phba->pci_bar2_map,
+						bar2map_len);
+			if (!phba->sli4_hba.drbl_regs_memmap_p) {
+				dev_err(&pdev->dev,
+					   "ioremap failed for SLI4 HBA"
+					   " doorbell registers.\n");
+				error = -ENOMEM;
+				goto out_iounmap_ctrl;
+			}
+			phba->pci_bar4_memmap_p =
+					phba->sli4_hba.drbl_regs_memmap_p;
+			error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+			if (error)
+				goto out_iounmap_all;
+		} else {
+			error = -ENOMEM;
+			goto out_iounmap_all;
+		}
+	}
+
+	return 0;
+
+out_iounmap_all:
+	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+out_iounmap_ctrl:
+	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+out_iounmap_conf:
+	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+out:
+	return error;
+}
+
+/**
+ * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+{
+	uint32_t if_type;
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		dev_printk(KERN_ERR, &phba->pcidev->dev,
+			   "FATAL - unsupported SLI4 interface type - %d\n",
+			   if_type);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs.
+ *
+ * Return codes
+ *   0 - successful
+ *   other values - error
+ **/
+static int
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
+{
+	int rc;
+	LPFC_MBOXQ_t *pmb;
+
+	/* Set up MSI-X multi-message vectors */
+	rc = pci_alloc_irq_vectors(phba->pcidev,
+			LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
+	if (rc < 0) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0420 PCI enable MSI-X failed (%d)\n", rc);
+		goto vec_fail_out;
+	}
+
+	/*
+	 * Assign MSI-X vectors to interrupt handlers
+	 */
+
+	/* vector-0 is associated to slow-path handler */
+	rc = request_irq(pci_irq_vector(phba->pcidev, 0),
+			 &lpfc_sli_sp_intr_handler, 0,
+			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0421 MSI-X slow-path request_irq failed "
+				"(%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* vector-1 is associated to fast-path handler */
+	rc = request_irq(pci_irq_vector(phba->pcidev, 1),
+			 &lpfc_sli_fp_intr_handler, 0,
+			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0429 MSI-X fast-path request_irq failed "
+				"(%d)\n", rc);
+		goto irq_fail_out;
+	}
+
+	/*
+	 * Configure HBA MSI-X attention conditions to messages
+	 */
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!pmb) {
+		rc = -ENOMEM;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0474 Unable to allocate memory for issuing "
+				"MBOX_CONFIG_MSI command\n");
+		goto mem_fail_out;
+	}
+	rc = lpfc_config_msi(phba, pmb);
+	if (rc)
+		goto mbx_fail_out;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"0351 Config MSI mailbox command failed, "
+				"mbxCmd x%x, mbxStatus x%x\n",
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
+		goto mbx_fail_out;
+	}
+
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+
+mbx_fail_out:
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+	/* free the irq already requested */
+	free_irq(pci_irq_vector(phba->pcidev, 1), phba);
+
+irq_fail_out:
+	/* free the irq already requested */
+	free_irq(pci_irq_vector(phba->pcidev, 0), phba);
+
+msi_fail_out:
+	/* Unconfigure MSI-X capability structure */
+	pci_free_irq_vectors(phba->pcidev);
+
+vec_fail_out:
+	return rc;
+}
+
+/**
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ */
+static int
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
+{
+	int rc;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0462 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0471 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+			 0, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0478 MSI request_irq failed (%d)\n", rc);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *   0 - successful
+ *   other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval;
+
+	if (cfg_mode == 2) {
+		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
+{
+	int nr_irqs, i;
+
+	if (phba->intr_type == MSIX)
+		nr_irqs = LPFC_MSIX_VECTORS;
+	else
+		nr_irqs = 1;
+
+	for (i = 0; i < nr_irqs; i++)
+		free_irq(pci_irq_vector(phba->pcidev, i), phba);
+	pci_free_irq_vectors(phba->pcidev);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+}
+
+/**
+ * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
+ * @phba: pointer to lpfc hba data structure.
+ * @vectors: number of msix vectors allocated.
+ *
+ * The routine will figure out the CPU affinity assignment for every
+ * MSI-X vector allocated for the HBA.  The hba_eq_hdl will be updated
+ * with a pointer to the CPU mask that defines ALL the CPUs this vector
+ * can be associated with. If the vector can be unquely associated with
+ * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
+ * In addition, the CPU to IO channel mapping will be calculated
+ * and the phba->sli4_hba.cpu_map array will reflect this.
+ */
+static void
+lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
+{
+	struct lpfc_vector_map_info *cpup;
+	int index = 0;
+	int vec = 0;
+	int cpu;
+#ifdef CONFIG_X86
+	struct cpuinfo_x86 *cpuinfo;
+#endif
+
+	/* Init cpu_map array */
+	memset(phba->sli4_hba.cpu_map, 0xff,
+	       (sizeof(struct lpfc_vector_map_info) *
+	       phba->sli4_hba.num_present_cpu));
+
+	/* Update CPU map with physical id and core id of each CPU */
+	cpup = phba->sli4_hba.cpu_map;
+	for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+		cpuinfo = &cpu_data(cpu);
+		cpup->phys_id = cpuinfo->phys_proc_id;
+		cpup->core_id = cpuinfo->cpu_core_id;
+#else
+		/* No distinction between CPUs for other platforms */
+		cpup->phys_id = 0;
+		cpup->core_id = 0;
+#endif
+		cpup->channel_id = index;  /* For now round robin */
+		cpup->irq = pci_irq_vector(phba->pcidev, vec);
+		vec++;
+		if (vec >= vectors)
+			vec = 0;
+		index++;
+		if (index >= phba->cfg_fcp_io_channel)
+			index = 0;
+		cpup++;
+	}
+}
+
+
+/**
+ * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+{
+	int vectors, rc, index;
+	char *name;
+
+	/* Set up MSI-X multi-message vectors */
+	vectors = phba->io_channel_irqs;
+	if (phba->cfg_fof)
+		vectors++;
+
+	rc = pci_alloc_irq_vectors(phba->pcidev,
+				(phba->nvmet_support) ? 1 : 2,
+				vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+	if (rc < 0) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0484 PCI enable MSI-X failed (%d)\n", rc);
+		goto vec_fail_out;
+	}
+	vectors = rc;
+
+	/* Assign MSI-X vectors to interrupt handlers */
+	for (index = 0; index < vectors; index++) {
+		name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+		memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
+		snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
+			 LPFC_DRIVER_HANDLER_NAME"%d", index);
+
+		phba->sli4_hba.hba_eq_hdl[index].idx = index;
+		phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+		atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
+		if (phba->cfg_fof && (index == (vectors - 1)))
+			rc = request_irq(pci_irq_vector(phba->pcidev, index),
+				 &lpfc_sli4_fof_intr_handler, 0,
+				 name,
+				 &phba->sli4_hba.hba_eq_hdl[index]);
+		else
+			rc = request_irq(pci_irq_vector(phba->pcidev, index),
+				 &lpfc_sli4_hba_intr_handler, 0,
+				 name,
+				 &phba->sli4_hba.hba_eq_hdl[index]);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"0486 MSI-X fast-path (%d) "
+					"request_irq failed (%d)\n", index, rc);
+			goto cfg_fail_out;
+		}
+	}
+
+	if (phba->cfg_fof)
+		vectors--;
+
+	if (vectors != phba->io_channel_irqs) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3238 Reducing IO channels to match number of "
+				"MSI-X vectors, requested %d got %d\n",
+				phba->io_channel_irqs, vectors);
+		if (phba->cfg_fcp_io_channel > vectors)
+			phba->cfg_fcp_io_channel = vectors;
+		if (phba->cfg_nvme_io_channel > vectors)
+			phba->cfg_nvme_io_channel = vectors;
+		if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
+			phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+		else
+			phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+	}
+	lpfc_cpu_affinity_check(phba, vectors);
+
+	return rc;
+
+cfg_fail_out:
+	/* free the irq already requested */
+	for (--index; index >= 0; index--)
+		free_irq(pci_irq_vector(phba->pcidev, index),
+				&phba->sli4_hba.hba_eq_hdl[index]);
+
+	/* Unconfigure MSI-X capability structure */
+	pci_free_irq_vectors(phba->pcidev);
+
+vec_fail_out:
+	return rc;
+}
+
+/**
+ * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The kernel function pci_enable_msi() is called
+ * to enable the MSI vector. The device driver is responsible for calling
+ * the request_irq() to register MSI vector with a interrupt the handler,
+ * which is done in this function.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+{
+	int rc, index;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0487 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0488 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+			 0, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0490 MSI request_irq failed (%d)\n", rc);
+		return rc;
+	}
+
+	for (index = 0; index < phba->io_channel_irqs; index++) {
+		phba->sli4_hba.hba_eq_hdl[index].idx = index;
+		phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+	}
+
+	if (phba->cfg_fof) {
+		phba->sli4_hba.hba_eq_hdl[index].idx = index;
+		phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-4
+ * interface spec. Depends on the interrupt mode configured to the driver,
+ * the driver will try to fallback from the configured interrupt mode to an
+ * interrupt mode which is supported by the platform, kernel, and device in
+ * the order of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static uint32_t
+lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval, idx;
+
+	if (cfg_mode == 2) {
+		/* Preparation before conf_msi mbox cmd */
+		retval = 0;
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli4_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli4_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			struct lpfc_hba_eq_hdl *eqhdl;
+
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+
+			for (idx = 0; idx < phba->io_channel_irqs; idx++) {
+				eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+				eqhdl->idx = idx;
+				eqhdl->phba = phba;
+				atomic_set(&eqhdl->hba_eq_in_use, 1);
+			}
+			if (phba->cfg_fof) {
+				eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+				eqhdl->idx = idx;
+				eqhdl->phba = phba;
+				atomic_set(&eqhdl->hba_eq_in_use, 1);
+			}
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate
+ * the driver's interrupt handler(s) from interrupt vector(s) to device
+ * with SLI-4 interface spec. Depending on the interrupt mode, the driver
+ * will release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli4_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX) {
+		int index;
+
+		/* Free up MSI-X multi-message vectors */
+		for (index = 0; index < phba->io_channel_irqs; index++)
+			free_irq(pci_irq_vector(phba->pcidev, index),
+					&phba->sli4_hba.hba_eq_hdl[index]);
+
+		if (phba->cfg_fof)
+			free_irq(pci_irq_vector(phba->pcidev, index),
+					&phba->sli4_hba.hba_eq_hdl[index]);
+	} else {
+		free_irq(phba->pcidev->irq, phba);
+	}
+
+	pci_free_irq_vectors(phba->pcidev);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+}
+
+/**
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
+ **/
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(shost->host_lock);
+
+	kfree(phba->vpi_bmask);
+	kfree(phba->vpi_ids);
+
+	lpfc_stop_hba_timers(phba);
+
+	phba->pport->work_port_events = 0;
+
+	lpfc_sli_hba_down(phba);
+
+	lpfc_sli_brdrestart(phba);
+
+	lpfc_sli_disable_intr(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+	int wait_time = 0;
+	int nvme_xri_cmpl = 1;
+	int nvmet_xri_cmpl = 1;
+	int fcp_xri_cmpl = 1;
+	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
+		fcp_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+		nvme_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+		nvmet_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+	}
+
+	while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
+	       !nvmet_xri_cmpl) {
+		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+			if (!nvme_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"6100 NVME XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			if (!fcp_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"2877 FCP XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			if (!els_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"2878 ELS XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+		} else {
+			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+		}
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+			nvme_xri_cmpl = list_empty(
+				&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+			nvmet_xri_cmpl = list_empty(
+				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+		}
+
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
+			fcp_xri_cmpl = list_empty(
+				&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+
+		els_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+	}
+}
+
+/**
+ * lpfc_sli4_hba_unset - Unset the fcoe hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to reset the HBA's FCoE
+ * function. The caller is not required to hold any lock. This routine
+ * issues PCI function reset mailbox command to reset the FCoE function.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static void
+lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+{
+	int wait_cnt = 0;
+	LPFC_MBOXQ_t *mboxq;
+	struct pci_dev *pdev = phba->pcidev;
+
+	lpfc_stop_hba_timers(phba);
+	phba->sli4_hba.intr_enable = 0;
+
+	/*
+	 * Gracefully wait out the potential current outstanding asynchronous
+	 * mailbox command.
+	 */
+
+	/* First, block any pending async mailbox command from posted */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, trying to wait it out if we can */
+	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		msleep(10);
+		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
+			break;
+	}
+	/* Forcefully release the outstanding mailbox command if timed out */
+	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_lock_irq(&phba->hbalock);
+		mboxq = phba->sli.mbox_active;
+		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		__lpfc_mbox_cmpl_put(phba, mboxq);
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		phba->sli.mbox_active = NULL;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Abort all iocbs associated with the hba */
+	lpfc_sli_hba_iocb_abort(phba);
+
+	/* Wait for completion of device XRI exchange busy */
+	lpfc_sli4_xri_exchange_busy_wait(phba);
+
+	/* Disable PCI subsystem interrupt */
+	lpfc_sli4_disable_intr(phba);
+
+	/* Disable SR-IOV if enabled */
+	if (phba->cfg_sriov_nr_virtfn)
+		pci_disable_sriov(pdev);
+
+	/* Stop kthread signal shall trigger work_done one more time */
+	kthread_stop(phba->worker_thread);
+
+	/* Unset the queues shared with the hardware then release all
+	 * allocated resources.
+	 */
+	lpfc_sli4_queue_unset(phba);
+	lpfc_sli4_queue_destroy(phba);
+
+	/* Reset SLI4 HBA FCoE function */
+	lpfc_pci_function_reset(phba);
+
+	/* Stop the SLI4 device port */
+	phba->pport->work_port_events = 0;
+}
+
+ /**
+ * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc;
+	struct lpfc_mqe *mqe;
+	struct lpfc_pc_sli4_params *sli4_params;
+	uint32_t mbox_tmo;
+
+	rc = 0;
+	mqe = &mboxq->u.mqe;
+
+	/* Read the port's SLI4 Parameters port capabilities */
+	lpfc_pc_sli4_params(mboxq);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	}
+
+	if (unlikely(rc))
+		return 1;
+
+	sli4_params = &phba->sli4_hba.pc_sli4_params;
+	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+					     &mqe->un.sli4_params);
+	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+					     &mqe->un.sli4_params);
+	sli4_params->proto_types = mqe->un.sli4_params.word3;
+	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+	/* Make sure that sge_supp_len can be handled by the driver */
+	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
+	return rc;
+}
+
+/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc;
+	struct lpfc_mqe *mqe = &mboxq->u.mqe;
+	struct lpfc_pc_sli4_params *sli4_params;
+	uint32_t mbox_tmo;
+	int length;
+	struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+	/*
+	 * By default, the driver assumes the SLI4 port requires RPI
+	 * header postings.  The SLI4_PARAM response will correct this
+	 * assumption.
+	 */
+	phba->sli4_hba.rpi_hdrs_in_use = 1;
+
+	/* Read the port's SLI4 Config Parameters */
+	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+			 length, LPFC_SLI4_MBX_EMBED);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	}
+	if (unlikely(rc))
+		return rc;
+	sli4_params = &phba->sli4_hba.pc_sli4_params;
+	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+					     mbx_sli4_parameters);
+	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+					     mbx_sli4_parameters);
+	if (bf_get(cfg_phwq, mbx_sli4_parameters))
+		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+	else
+		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
+	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+	sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
+	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+					    mbx_sli4_parameters);
+	sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
+	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+					   mbx_sli4_parameters);
+	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+	phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
+			      bf_get(cfg_xib, mbx_sli4_parameters));
+
+	if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
+	    !phba->nvme_support) {
+		phba->nvme_support = 0;
+		phba->nvmet_support = 0;
+		phba->cfg_nvmet_mrq = 0;
+		phba->cfg_nvme_io_channel = 0;
+		phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
+				"6101 Disabling NVME support: "
+				"Not supported by firmware: %d %d\n",
+				bf_get(cfg_nvme, mbx_sli4_parameters),
+				bf_get(cfg_xib, mbx_sli4_parameters));
+
+		/* If firmware doesn't support NVME, just use SCSI support */
+		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+			return -ENODEV;
+		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+	}
+
+	if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
+		phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
+
+	if (bf_get(cfg_eqdr, mbx_sli4_parameters))
+		phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
+
+	/* Make sure that sge_supp_len can be handled by the driver */
+	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
+	/*
+	 * Issue IOs with CDB embedded in WQE to minimized the number
+	 * of DMAs the firmware has to do. Setting this to 1 also forces
+	 * the driver to use 128 bytes WQEs for FCP IOs.
+	 */
+	if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
+		phba->fcp_embed_io = 1;
+	else
+		phba->fcp_embed_io = 0;
+
+	/*
+	 * Check if the SLI port supports MDS Diagnostics
+	 */
+	if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
+		phba->mds_diags_support = 1;
+	else
+		phba->mds_diags_support = 0;
+	return 0;
+}
+
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct lpfc_hba   *phba;
+	struct lpfc_vport *vport = NULL;
+	struct Scsi_Host  *shost = NULL;
+	int error;
+	uint32_t cfg_mode, intr_mode;
+
+	/* Allocate memory for HBA structure */
+	phba = lpfc_hba_alloc(pdev);
+	if (!phba)
+		return -ENOMEM;
+
+	/* Perform generic PCI device enabling operation */
+	error = lpfc_enable_pci_dev(phba);
+	if (error)
+		goto out_free_phba;
+
+	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+	if (error)
+		goto out_disable_pci_dev;
+
+	/* Set up SLI-3 specific device PCI memory space */
+	error = lpfc_sli_pci_mem_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1402 Failed to set up pci memory space.\n");
+		goto out_disable_pci_dev;
+	}
+
+	/* Set up SLI-3 specific device driver resources */
+	error = lpfc_sli_driver_resource_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1404 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Initialize and populate the iocb list per host */
+
+	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1405 Failed to initialize iocb list.\n");
+		goto out_unset_driver_resource_s3;
+	}
+
+	/* Set up common device driver resources */
+	error = lpfc_setup_driver_resource_phase2(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1406 Failed to set up driver resource.\n");
+		goto out_free_iocb_list;
+	}
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1407 Failed to create scsi host.\n");
+		goto out_unset_driver_resource;
+	}
+
+	/* Configure sysfs attributes */
+	vport = phba->pport;
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1476 Failed to allocate sysfs attr\n");
+		goto out_destroy_shost;
+	}
+
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+	/* Now, trying to enable interrupt and bring up the device */
+	cfg_mode = phba->cfg_use_msi;
+	while (true) {
+		/* Put device to a known state before enabling interrupt */
+		lpfc_stop_port(phba);
+		/* Configure and enable interrupt */
+		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
+		if (intr_mode == LPFC_INTR_ERROR) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0431 Failed to enable interrupt.\n");
+			error = -ENODEV;
+			goto out_free_sysfs_attr;
+		}
+		/* SLI-3 HBA setup */
+		if (lpfc_sli_hba_setup(phba)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1477 Failed to set up hba\n");
+			error = -ENODEV;
+			goto out_remove_device;
+		}
+
+		/* Wait 50ms for the interrupts of previous mailbox commands */
+		msleep(50);
+		/* Check active interrupts on message signaled interrupts */
+		if (intr_mode == 0 ||
+		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+			/* Log the current active interrupt mode */
+			phba->intr_mode = intr_mode;
+			lpfc_log_intr_mode(phba, intr_mode);
+			break;
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0447 Configure interrupt mode (%d) "
+					"failed active interrupt test.\n",
+					intr_mode);
+			/* Disable the current interrupt mode */
+			lpfc_sli_disable_intr(phba);
+			/* Try next level of interrupt mode */
+			cfg_mode = --intr_mode;
+		}
+	}
+
+	/* Perform post initialization setup */
+	lpfc_post_init_setup(phba);
+
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
+
+	return 0;
+
+out_remove_device:
+	lpfc_unset_hba(phba);
+out_free_sysfs_attr:
+	lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+	lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+	lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+	lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+	lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+	lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+	lpfc_disable_pci_dev(phba);
+	if (shost)
+		scsi_host_put(shost);
+out_free_phba:
+	lpfc_hba_free(phba);
+	return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_vport **vports;
+	struct lpfc_hba   *phba = vport->phba;
+	int i;
+
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_free_sysfs_attr(vport);
+
+	/* Release all the vports against this physical port */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
+			fc_vport_terminate(vports[i]->fc_vport);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Remove FC host and then SCSI host with the physical port */
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+
+	lpfc_cleanup(vport);
+
+	/*
+	 * Bring down the SLI Layer. This step disable all interrupts,
+	 * clears the rings, discards all mailbox commands, and resets
+	 * the HBA.
+	 */
+
+	/* HBA interrupt will be disabled after this call */
+	lpfc_sli_hba_down(phba);
+	/* Stop kthread signal shall trigger work_done one more time */
+	kthread_stop(phba->worker_thread);
+	/* Final cleanup of txcmplq and reset the HBA */
+	lpfc_sli_brdrestart(phba);
+
+	kfree(phba->vpi_bmask);
+	kfree(phba->vpi_ids);
+
+	lpfc_stop_hba_timers(phba);
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_debugfs_terminate(vport);
+
+	/* Disable SR-IOV if enabled */
+	if (phba->cfg_sriov_nr_virtfn)
+		pci_disable_sriov(pdev);
+
+	/* Disable interrupt */
+	lpfc_sli_disable_intr(phba);
+
+	scsi_host_put(shost);
+
+	/*
+	 * Call scsi_free before mem_free since scsi bufs are released to their
+	 * corresponding pools here.
+	 */
+	lpfc_scsi_free(phba);
+	lpfc_mem_free_all(phba);
+
+	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+
+	/* Free resources associated with SLI2 interface */
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+
+	/* unmap adapter SLIM and Control Registers */
+	iounmap(phba->ctrl_regs_memmap_p);
+	iounmap(phba->slim_memmap_p);
+
+	lpfc_hba_free(phba);
+
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0473 PCI device Power Management suspend.\n");
+
+	/* Bring down the device */
+	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+	lpfc_offline(phba);
+	kthread_stop(phba->worker_thread);
+
+	/* Disable interrupt from device */
+	lpfc_sli_disable_intr(phba);
+
+	/* Save device state to PCI config space */
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint32_t intr_mode;
+	int error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0452 PCI device Power Management resume.\n");
+
+	/* Restore device state from PCI config space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					"lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0434 PM resume failed to start worker "
+				"thread: error=x%x.\n", error);
+		return error;
+	}
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0430 PM resume Failed to enable interrupt\n");
+		return -EIO;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Restart HBA and bring it online */
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot recover. It
+ * aborts all the outstanding SCSI I/Os to the pci device.
+ **/
+static void
+lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2723 PCI channel I/O abort preparing for recovery\n");
+
+	/*
+	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+	 * and let the SCSI mid-layer to retry them to recover.
+	 */
+	lpfc_sli_abort_fcp_rings(phba);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2710 PCI channel disable preparing for reset\n");
+
+	/* Block any management I/Os to the device */
+	lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
+
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
+	lpfc_sli_flush_fcp_rings(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Disable interrupt and pci device */
+	lpfc_sli_disable_intr(phba);
+	pci_disable_device(phba->pcidev);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2711 PCI channel permanent disable for failure\n");
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Clean up all driver's outstanding SCSI I/Os */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		/* Non-fatal error, prepare for recovery */
+		lpfc_sli_prep_dev_for_recover(phba);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		/* Fatal error, prepare for slot reset */
+		lpfc_sli_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		/* Permanent failure, prepare for device down */
+		lpfc_sli_prep_dev_for_perm_failure(phba);
+		return PCI_ERS_RESULT_DISCONNECT;
+	default:
+		/* Unknown state, prepare and request slot reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0472 Unknown PCI error state: x%x\n", state);
+		lpfc_sli_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+}
+
+/**
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t intr_mode;
+
+	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+	if (pci_enable_device_mem(pdev)) {
+		printk(KERN_ERR "lpfc: Cannot re-enable "
+			"PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0427 Cannot re-enable interrupt after "
+				"slot reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Take device offline, it will perform cleanup */
+	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+	lpfc_offline(phba);
+	lpfc_sli_brdrestart(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ */
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	/* Bring device online, it will be no-op for non-fatal error resume */
+	lpfc_online(phba);
+
+	/* Clean up Advanced Error Reporting (AER) if needed */
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+/**
+ * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
+{
+	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (max_xri <= 100)
+			return 10;
+		else if (max_xri <= 256)
+			return 25;
+		else if (max_xri <= 512)
+			return 50;
+		else if (max_xri <= 1024)
+			return 100;
+		else if (max_xri <= 1536)
+			return 150;
+		else if (max_xri <= 2048)
+			return 200;
+		else
+			return 250;
+	} else
+		return 0;
+}
+
+/**
+ * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT + NVMET IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
+{
+	int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
+
+	if (phba->nvmet_support)
+		max_xri += LPFC_NVMET_BUF_POST;
+	return max_xri;
+}
+
+
+/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @fw: pointer to firmware image returned from request_firmware.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ **/
+static void
+lpfc_write_firmware(const struct firmware *fw, void *context)
+{
+	struct lpfc_hba *phba = (struct lpfc_hba *)context;
+	char fwrev[FW_REV_STR_SIZE];
+	struct lpfc_grp_hdr *image;
+	struct list_head dma_buffer_list;
+	int i, rc = 0;
+	struct lpfc_dmabuf *dmabuf, *next;
+	uint32_t offset = 0, temp_offset = 0;
+	uint32_t magic_number, ftype, fid, fsize;
+
+	/* It can be null in no-wait mode, sanity check */
+	if (!fw) {
+		rc = -ENXIO;
+		goto out;
+	}
+	image = (struct lpfc_grp_hdr *)fw->data;
+
+	magic_number = be32_to_cpu(image->magic_number);
+	ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
+	fid = bf_get_be32(lpfc_grp_hdr_id, image),
+	fsize = be32_to_cpu(image->size);
+
+	INIT_LIST_HEAD(&dma_buffer_list);
+	if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
+	     magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
+	    ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3022 Invalid FW image found. "
+				"Magic:%x Type:%x ID:%x Size %d %zd\n",
+				magic_number, ftype, fid, fsize, fw->size);
+		rc = -EINVAL;
+		goto release_out;
+	}
+	lpfc_decode_firmware_rev(phba, fwrev, 1);
+	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3023 Updating Firmware, Current Version:%s "
+				"New Version:%s\n",
+				fwrev, image->revision);
+		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+					 GFP_KERNEL);
+			if (!dmabuf) {
+				rc = -ENOMEM;
+				goto release_out;
+			}
+			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+							  SLI4_PAGE_SIZE,
+							  &dmabuf->phys,
+							  GFP_KERNEL);
+			if (!dmabuf->virt) {
+				kfree(dmabuf);
+				rc = -ENOMEM;
+				goto release_out;
+			}
+			list_add_tail(&dmabuf->list, &dma_buffer_list);
+		}
+		while (offset < fw->size) {
+			temp_offset = offset;
+			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
+					memcpy(dmabuf->virt,
+					       fw->data + temp_offset,
+					       fw->size - temp_offset);
+					temp_offset = fw->size;
+					break;
+				}
+				memcpy(dmabuf->virt, fw->data + temp_offset,
+				       SLI4_PAGE_SIZE);
+				temp_offset += SLI4_PAGE_SIZE;
+			}
+			rc = lpfc_wr_object(phba, &dma_buffer_list,
+				    (fw->size - offset), &offset);
+			if (rc)
+				goto release_out;
+		}
+		rc = offset;
+	}
+
+release_out:
+	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+		list_del(&dmabuf->list);
+		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+	release_firmware(fw);
+out:
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3024 Firmware update done: %d.\n", rc);
+	return;
+}
+
+/**
+ * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to perform Linux generic firmware upgrade on device
+ * that supports such feature.
+ **/
+int
+lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+{
+	uint8_t file_name[ELX_MODEL_NAME_SIZE];
+	int ret;
+	const struct firmware *fw;
+
+	/* Only supported on SLI4 interface type 2 for now */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_IF_TYPE_2)
+		return -EPERM;
+
+	snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+
+	if (fw_upgrade == INT_FW_UPGRADE) {
+		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+					file_name, &phba->pcidev->dev,
+					GFP_KERNEL, (void *)phba,
+					lpfc_write_firmware);
+	} else if (fw_upgrade == RUN_FW_UPGRADE) {
+		ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
+		if (!ret)
+			lpfc_write_firmware(fw, (void *)phba);
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it
+ * can support this kind of device. If the match is successful, the driver
+ * core invokes this routine. If this routine determines it can claim the HBA,
+ * it does all the initialization that it needs to do to handle the HBA
+ * properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int
+lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct lpfc_hba   *phba;
+	struct lpfc_vport *vport = NULL;
+	struct Scsi_Host  *shost = NULL;
+	int error;
+	uint32_t cfg_mode, intr_mode;
+
+	/* Allocate memory for HBA structure */
+	phba = lpfc_hba_alloc(pdev);
+	if (!phba)
+		return -ENOMEM;
+
+	/* Perform generic PCI device enabling operation */
+	error = lpfc_enable_pci_dev(phba);
+	if (error)
+		goto out_free_phba;
+
+	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
+	if (error)
+		goto out_disable_pci_dev;
+
+	/* Set up SLI-4 specific device PCI memory space */
+	error = lpfc_sli4_pci_mem_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1410 Failed to set up pci memory space.\n");
+		goto out_disable_pci_dev;
+	}
+
+	/* Set up SLI-4 Specific device driver resources */
+	error = lpfc_sli4_driver_resource_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1412 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s4;
+	}
+
+	INIT_LIST_HEAD(&phba->active_rrq_list);
+	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
+
+	/* Set up common device driver resources */
+	error = lpfc_setup_driver_resource_phase2(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1414 Failed to set up driver resource.\n");
+		goto out_unset_driver_resource_s4;
+	}
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1415 Failed to create scsi host.\n");
+		goto out_unset_driver_resource;
+	}
+
+	/* Configure sysfs attributes */
+	vport = phba->pport;
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1416 Failed to allocate sysfs attr\n");
+		goto out_destroy_shost;
+	}
+
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+	/* Now, trying to enable interrupt and bring up the device */
+	cfg_mode = phba->cfg_use_msi;
+
+	/* Put device to a known state before enabling interrupt */
+	lpfc_stop_port(phba);
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0426 Failed to enable interrupt.\n");
+		error = -ENODEV;
+		goto out_free_sysfs_attr;
+	}
+	/* Default to single EQ for non-MSI-X */
+	if (phba->intr_type != MSIX) {
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
+			phba->cfg_fcp_io_channel = 1;
+		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+			phba->cfg_nvme_io_channel = 1;
+			if (phba->nvmet_support)
+				phba->cfg_nvmet_mrq = 1;
+		}
+		phba->io_channel_irqs = 1;
+	}
+
+	/* Set up SLI-4 HBA */
+	if (lpfc_sli4_hba_setup(phba)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1421 Failed to set up hba\n");
+		error = -ENODEV;
+		goto out_disable_intr;
+	}
+
+	/* Log the current active interrupt mode */
+	phba->intr_mode = intr_mode;
+	lpfc_log_intr_mode(phba, intr_mode);
+
+	/* Perform post initialization setup */
+	lpfc_post_init_setup(phba);
+
+	/* NVME support in FW earlier in the driver load corrects the
+	 * FC4 type making a check for nvme_support unnecessary.
+	 */
+	if ((phba->nvmet_support == 0) &&
+	    (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+		/* Create NVME binding with nvme_fc_transport. This
+		 * ensures the vport is initialized.  If the localport
+		 * create fails, it should not unload the driver to
+		 * support field issues.
+		 */
+		error = lpfc_nvme_create_localport(vport);
+		if (error) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6004 NVME registration failed, "
+					"error x%x\n",
+					error);
+		}
+	}
+
+	/* check for firmware upgrade or downgrade */
+	if (phba->cfg_request_firmware_upgrade)
+		lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
+
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
+	return 0;
+
+out_disable_intr:
+	lpfc_sli4_disable_intr(phba);
+out_free_sysfs_attr:
+	lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+	lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+	lpfc_unset_driver_resource_phase2(phba);
+out_unset_driver_resource_s4:
+	lpfc_sli4_driver_resource_unset(phba);
+out_unset_pci_mem_s4:
+	lpfc_sli4_pci_mem_unset(phba);
+out_disable_pci_dev:
+	lpfc_disable_pci_dev(phba);
+	if (shost)
+		scsi_host_put(shost);
+out_free_phba:
+	lpfc_hba_free(phba);
+	return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void
+lpfc_pci_remove_one_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_vport **vports;
+	struct lpfc_hba *phba = vport->phba;
+	int i;
+
+	/* Mark the device unloading flag */
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Free the HBA sysfs attributes */
+	lpfc_free_sysfs_attr(vport);
+
+	/* Release all the vports against this physical port */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
+			fc_vport_terminate(vports[i]->fc_vport);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Remove FC host and then SCSI host with the physical port */
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+	/*
+	 * Bring down the SLI Layer. This step disables all interrupts,
+	 * clears the rings, discards all mailbox commands, and resets
+	 * the HBA FCoE function.
+	 */
+	lpfc_debugfs_terminate(vport);
+	lpfc_sli4_hba_unset(phba);
+
+	/* Perform ndlp cleanup on the physical port.  The nvme and nvmet
+	 * localports are destroyed after to cleanup all transport memory.
+	 */
+	lpfc_cleanup(vport);
+	lpfc_nvmet_destroy_targetport(phba);
+	lpfc_nvme_destroy_localport(vport);
+
+
+	lpfc_stop_hba_timers(phba);
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Perform scsi free before driver resource_unset since scsi
+	 * buffers are released to their corresponding pools here.
+	 */
+	lpfc_scsi_free(phba);
+	lpfc_nvme_free(phba);
+	lpfc_free_iocb_list(phba);
+
+	lpfc_sli4_driver_resource_unset(phba);
+
+	/* Unmap adapter Control and Doorbell registers */
+	lpfc_sli4_pci_mem_unset(phba);
+
+	/* Release PCI resources and disable device's PCI function */
+	scsi_host_put(shost);
+	lpfc_disable_pci_dev(phba);
+
+	/* Finally, free the driver's device data structure */
+	lpfc_hba_free(phba);
+
+	return;
+}
+
+/**
+ * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
+ * this method, it quiesces the device by stopping the driver's worker
+ * thread for the device, turning off device's interrupt and DMA, and bring
+ * the device offline. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM support for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the driver will set
+ * device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2843 PCI device Power Management suspend.\n");
+
+	/* Bring down the device */
+	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+	lpfc_offline(phba);
+	kthread_stop(phba->worker_thread);
+
+	/* Disable interrupt from device */
+	lpfc_sli4_disable_intr(phba);
+	lpfc_sli4_queue_destroy(phba);
+
+	/* Save device state to PCI config space */
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
+ * this method, it restores the device's PCI config space state and fully
+ * reinitializes the device and brings it online. Note that as the driver
+ * implements the minimum PM requirements to a power-aware driver's PM for
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver
+ * will fully reinitialize its device during resume() method call, the device
+ * will be set to PCI_D0 directly in PCI config space before restoring the
+ * state.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint32_t intr_mode;
+	int error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0292 PCI device Power Management resume.\n");
+
+	/* Restore device state from PCI config space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	 /* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					"lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0293 PM resume failed to start worker "
+				"thread: error=x%x.\n", error);
+		return error;
+	}
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0294 PM resume Failed to enable interrupt\n");
+		return -EIO;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Restart HBA and bring it online */
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot recover. It
+ * aborts all the outstanding SCSI I/Os to the pci device.
+ **/
+static void
+lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2828 PCI channel I/O abort preparing for recovery\n");
+	/*
+	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+	 * and let the SCSI mid-layer to retry them to recover.
+	 */
+	lpfc_sli_abort_fcp_rings(phba);
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2826 PCI channel disable preparing for reset\n");
+
+	/* Block any management I/Os to the device */
+	lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
+
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
+	lpfc_sli_flush_fcp_rings(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Disable interrupt and pci device */
+	lpfc_sli4_disable_intr(phba);
+	lpfc_sli4_queue_destroy(phba);
+	pci_disable_device(phba->pcidev);
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2827 PCI channel permanent disable for failure\n");
+
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Clean up all driver's outstanding SCSI I/Os */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. This function is called by the PCI subsystem
+ * after a PCI bus error affecting this device has been detected. When this
+ * function is invoked, it will need to stop all the I/Os and interrupt(s)
+ * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
+ * for the PCI subsystem to perform proper recovery as desired.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		/* Non-fatal error, prepare for recovery */
+		lpfc_sli4_prep_dev_for_recover(phba);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		/* Fatal error, prepare for slot reset */
+		lpfc_sli4_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		/* Permanent failure, prepare for device down */
+		lpfc_sli4_prep_dev_for_perm_failure(phba);
+		return PCI_ERS_RESULT_DISCONNECT;
+	default:
+		/* Unknown state, prepare and request slot reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2825 Unknown PCI error state: x%x\n", state);
+		lpfc_sli4_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+}
+
+/**
+ * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called after PCI bus has been reset to
+ * restart the PCI card from scratch, as if from a cold-boot. During the
+ * PCI subsystem error recovery, after the driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method to
+ * recover the device. This function will initialize the HBA device, enable
+ * the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t intr_mode;
+
+	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+	if (pci_enable_device_mem(pdev)) {
+		printk(KERN_ERR "lpfc: Cannot re-enable "
+			"PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2824 Cannot re-enable interrupt after "
+				"slot reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ **/
+static void
+lpfc_io_resume_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	/*
+	 * In case of slot reset, as function reset is performed through
+	 * mailbox command which needs DMA to be enabled, this operation
+	 * has to be moved to the io resume phase. Taking device offline
+	 * will perform the necessary cleanup.
+	 */
+	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
+		/* Perform device reset */
+		lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+		lpfc_offline(phba);
+		lpfc_sli_brdrestart(phba);
+		/* Bring the device back online */
+		lpfc_online(phba);
+	}
+
+	/* Clean up Advanced Error Reporting (AER) if needed */
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+/**
+ * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
+ * at PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. This routine dispatches
+ * the action to the proper SLI-3 or SLI-4 device probing routine, which will
+ * do all the initialization that it needs to do to handle the HBA device
+ * properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	int rc;
+	struct lpfc_sli_intf intf;
+
+	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
+		return -ENODEV;
+
+	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
+	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
+		rc = lpfc_pci_probe_one_s4(pdev, pid);
+	else
+		rc = lpfc_pci_probe_one_s3(pdev, pid);
+
+	return rc;
+}
+
+/**
+ * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
+ * This routine dispatches the action to the proper SLI-3 or SLI-4 device
+ * remove routine, which will perform all the necessary cleanup for the
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		lpfc_pci_remove_one_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		lpfc_pci_remove_one_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1424 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
+ * suspend the device.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int rc = -ENODEV;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_pci_suspend_one_s3(pdev, msg);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_pci_suspend_one_s4(pdev, msg);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1425 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device resume routine, which will
+ * resume the device.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int rc = -ENODEV;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_pci_resume_one_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_pci_resume_one_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1426 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_error_detected - lpfc method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device error detected handling
+ * routine, which will perform the proper error detected operation.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_io_error_detected_s3(pdev, state);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_io_error_detected_s4(pdev, state);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1427 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called after PCI bus has been reset to restart the PCI card
+ * from scratch, as if from a cold-boot. When this routine is invoked, it
+ * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
+ * routine, which will perform the proper device reset.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_slot_reset(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_io_slot_reset_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_io_slot_reset_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1428 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_resume - lpfc method for resuming PCI I/O operation
+ * @pdev: pointer to PCI device
+ *
+ * This routine is registered to the PCI subsystem for error handling. It
+ * is called when kernel error recovery tells the lpfc driver that it is
+ * OK to resume normal PCI operation after PCI bus error recovery. When
+ * this routine is invoked, it dispatches the action to the proper SLI-3
+ * or SLI-4 device io_resume routine, which will resume the device operation.
+ **/
+static void
+lpfc_io_resume(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		lpfc_io_resume_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		lpfc_io_resume_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1429 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if OAS is supported for this adapter. If
+ * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
+ * the enable oas flag is cleared and the pool created for OAS device data
+ * is destroyed.
+ *
+ **/
+void
+lpfc_sli4_oas_verify(struct lpfc_hba *phba)
+{
+
+	if (!phba->cfg_EnableXLane)
+		return;
+
+	if (phba->sli4_hba.pc_sli4_params.oas_supported) {
+		phba->cfg_fof = 1;
+	} else {
+		phba->cfg_fof = 0;
+		if (phba->device_data_mem_pool)
+			mempool_destroy(phba->device_data_mem_pool);
+		phba->device_data_mem_pool = NULL;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_fof_queue_setup - Set up all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the fof queues for the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+int
+lpfc_fof_queue_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring *pring;
+	int rc;
+
+	rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
+	if (rc)
+		return -ENOMEM;
+
+	if (phba->cfg_fof) {
+
+		rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
+				    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
+		if (rc)
+			goto out_oas_cq;
+
+		rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
+				    phba->sli4_hba.oas_cq, LPFC_FCP);
+		if (rc)
+			goto out_oas_wq;
+
+		/* Bind this CQ/WQ to the NVME ring */
+		pring = phba->sli4_hba.oas_wq->pring;
+		pring->sli.sli4.wqp =
+			(void *)phba->sli4_hba.oas_wq;
+		phba->sli4_hba.oas_cq->pring = pring;
+	}
+
+	return 0;
+
+out_oas_wq:
+	lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
+out_oas_cq:
+	lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
+	return rc;
+
+}
+
+/**
+ * lpfc_fof_queue_create - Create all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the fof queues for the FC HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No availble memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_fof_queue_create(struct lpfc_hba *phba)
+{
+	struct lpfc_queue *qdesc;
+	uint32_t wqesize;
+
+	/* Create FOF EQ */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+				      phba->sli4_hba.eq_ecount);
+	if (!qdesc)
+		goto out_error;
+
+	phba->sli4_hba.fof_eq = qdesc;
+
+	if (phba->cfg_fof) {
+
+		/* Create OAS CQ */
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+						      phba->sli4_hba.cq_ecount);
+		if (!qdesc)
+			goto out_error;
+
+		phba->sli4_hba.oas_cq = qdesc;
+
+		/* Create OAS WQ */
+		wqesize = (phba->fcp_embed_io) ?
+				LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+		qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+					      phba->sli4_hba.wq_ecount);
+
+		if (!qdesc)
+			goto out_error;
+
+		phba->sli4_hba.oas_wq = qdesc;
+		list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+
+	}
+	return 0;
+
+out_error:
+	lpfc_fof_queue_destroy(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_fof_queue_destroy - Destroy all the fof queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FC HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ **/
+int
+lpfc_fof_queue_destroy(struct lpfc_hba *phba)
+{
+	/* Release FOF Event queue */
+	if (phba->sli4_hba.fof_eq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
+		phba->sli4_hba.fof_eq = NULL;
+	}
+
+	/* Release OAS Completion queue */
+	if (phba->sli4_hba.oas_cq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
+		phba->sli4_hba.oas_cq = NULL;
+	}
+
+	/* Release OAS Work queue */
+	if (phba->sli4_hba.oas_wq != NULL) {
+		lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
+		phba->sli4_hba.oas_wq = NULL;
+	}
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(pci, lpfc_id_table);
+
+static const struct pci_error_handlers lpfc_err_handler = {
+	.error_detected = lpfc_io_error_detected,
+	.slot_reset = lpfc_io_slot_reset,
+	.resume = lpfc_io_resume,
+};
+
+static struct pci_driver lpfc_driver = {
+	.name		= LPFC_DRIVER_NAME,
+	.id_table	= lpfc_id_table,
+	.probe		= lpfc_pci_probe_one,
+	.remove		= lpfc_pci_remove_one,
+	.shutdown	= lpfc_pci_remove_one,
+	.suspend        = lpfc_pci_suspend_one,
+	.resume		= lpfc_pci_resume_one,
+	.err_handler    = &lpfc_err_handler,
+};
+
+static const struct file_operations lpfc_mgmt_fop = {
+	.owner = THIS_MODULE,
+};
+
+static struct miscdevice lpfc_mgmt_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "lpfcmgmt",
+	.fops = &lpfc_mgmt_fop,
+};
+
+/**
+ * lpfc_init - lpfc module initialization routine
+ *
+ * This routine is to be invoked when the lpfc module is loaded into the
+ * kernel. The special kernel macro module_init() is used to indicate the
+ * role of this routine to the kernel as lpfc module entry point.
+ *
+ * Return codes
+ *   0 - successful
+ *   -ENOMEM - FC attach transport failed
+ *   all others - failed
+ */
+static int __init
+lpfc_init(void)
+{
+	int error = 0;
+
+	printk(LPFC_MODULE_DESC "\n");
+	printk(LPFC_COPYRIGHT "\n");
+
+	error = misc_register(&lpfc_mgmt_dev);
+	if (error)
+		printk(KERN_ERR "Could not register lpfcmgmt device, "
+			"misc_register returned with status %d", error);
+
+	lpfc_transport_functions.vport_create = lpfc_vport_create;
+	lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+	lpfc_transport_template =
+				fc_attach_transport(&lpfc_transport_functions);
+	if (lpfc_transport_template == NULL)
+		return -ENOMEM;
+	lpfc_vport_transport_template =
+		fc_attach_transport(&lpfc_vport_transport_functions);
+	if (lpfc_vport_transport_template == NULL) {
+		fc_release_transport(lpfc_transport_template);
+		return -ENOMEM;
+	}
+
+	/* Initialize in case vector mapping is needed */
+	lpfc_used_cpu = NULL;
+	lpfc_present_cpu = num_present_cpus();
+
+	error = pci_register_driver(&lpfc_driver);
+	if (error) {
+		fc_release_transport(lpfc_transport_template);
+		fc_release_transport(lpfc_vport_transport_template);
+	}
+
+	return error;
+}
+
+/**
+ * lpfc_exit - lpfc module removal routine
+ *
+ * This routine is invoked when the lpfc module is removed from the kernel.
+ * The special kernel macro module_exit() is used to indicate the role of
+ * this routine to the kernel as lpfc module exit point.
+ */
+static void __exit
+lpfc_exit(void)
+{
+	misc_deregister(&lpfc_mgmt_dev);
+	pci_unregister_driver(&lpfc_driver);
+	fc_release_transport(lpfc_transport_template);
+	fc_release_transport(lpfc_vport_transport_template);
+	if (_dump_buf_data) {
+		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
+				"_dump_buf_data at 0x%p\n",
+				(1L << _dump_buf_data_order), _dump_buf_data);
+		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
+	}
+
+	if (_dump_buf_dif) {
+		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
+				"_dump_buf_dif at 0x%p\n",
+				(1L << _dump_buf_dif_order), _dump_buf_dif);
+		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
+	}
+	kfree(lpfc_used_cpu);
+	idr_destroy(&lpfc_hba_index);
+}
+
+module_init(lpfc_init);
+module_exit(lpfc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(LPFC_MODULE_DESC);
+MODULE_AUTHOR("Broadcom");
+MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_logmsg.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_logmsg.h
new file mode 100644
index 0000000..3b654ad
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -0,0 +1,65 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LOG_ELS		0x00000001	/* ELS events */
+#define LOG_DISCOVERY	0x00000002	/* Link discovery events */
+#define LOG_MBOX	0x00000004	/* Mailbox events */
+#define LOG_INIT	0x00000008	/* Initialization events */
+#define LOG_LINK_EVENT	0x00000010	/* Link events */
+#define LOG_IP		0x00000020	/* IP traffic history */
+#define LOG_FCP		0x00000040	/* FCP traffic history */
+#define LOG_NODE	0x00000080	/* Node table events */
+#define LOG_TEMP	0x00000100	/* Temperature sensor events */
+#define LOG_BG		0x00000200	/* BlockGuard events */
+#define LOG_MISC	0x00000400	/* Miscellaneous events */
+#define LOG_SLI		0x00000800	/* SLI events */
+#define LOG_FCP_ERROR	0x00001000	/* log errors, not underruns */
+#define LOG_LIBDFC	0x00002000	/* Libdfc events */
+#define LOG_VPORT	0x00004000	/* NPIV events */
+#define LOG_SECURITY	0x00008000	/* Security events */
+#define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
+#define LOG_FIP		0x00020000	/* FIP events */
+#define LOG_FCP_UNDER	0x00040000	/* FCP underruns errors */
+#define LOG_SCSI_CMD	0x00080000	/* ALL SCSI commands */
+#define LOG_NVME	0x00100000	/* NVME general events. */
+#define LOG_NVME_DISC   0x00200000      /* NVME Discovery/Connect events. */
+#define LOG_NVME_ABTS   0x00400000      /* NVME ABTS events. */
+#define LOG_NVME_IOERR  0x00800000      /* NVME IO Error events. */
+#define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
+
+#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
+do { \
+	{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
+		dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+			   fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
+} while (0)
+
+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+do { \
+	{ uint32_t log_verbose = (phba)->pport ? \
+				 (phba)->pport->cfg_log_verbose : \
+				 (phba)->cfg_log_verbose; \
+	  if (((mask) & log_verbose) || (level[1] <= '3')) \
+		dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+			   fmt, phba->brd_no, ##arg); \
+	} \
+} while (0)
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_mbox.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_mbox.c
new file mode 100644
index 0000000..81fb929
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_mbox.c
@@ -0,0 +1,2658 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_compat.h"
+
+/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+int
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		uint16_t offset)
+{
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *mp;
+
+	mb = &pmb->u.mb;
+
+	/* Setup to dump vport info region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = offset;
+	mb->un.varDmp.region_id = DMP_REGION_VPORT;
+	mb->mbxOwner = OWN_HOST;
+
+	/* For SLI3 HBAs data is embedded in mailbox */
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		mb->un.varDmp.cv = 1;
+		mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+		return 0;
+	}
+
+	/* For SLI4 HBAs driver need to allocate memory */
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+			"2605 lpfc_dump_static_vport: memory"
+			" allocation failed\n");
+		return 1;
+	}
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+	/* save address for completion */
+	pmb->context1 = (uint8_t *)mp;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
+
+	return 0;
+}
+
+/**
+ * lpfc_down_link - Bring down HBAs link.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares a mailbox command to bring down HBA link.
+ **/
+void
+lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb = &pmb->u.mb;
+	mb->mbxCommand = MBX_DOWN_LINK;
+	mb->mbxOwner = OWN_HOST;
+}
+
+/**
+ * lpfc_dump_mem - Prepare a mailbox command for reading a region.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset into the region.
+ * @region_id: config region id.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping HBA's config region.
+ **/
+void
+lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
+		uint16_t region_id)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->u.mb;
+	ctx = pmb->context2;
+
+	/* Setup to dump VPD region */
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = offset;
+	mb->un.varDmp.region_id = region_id;
+	mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This function create a dump memory mailbox command to dump wake up
+ * parameters.
+ */
+void
+lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->u.mb;
+	/* Save context so that we can restore after memset */
+	ctx = pmb->context2;
+
+	/* Setup to dump VPD region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->mbxOwner = OWN_HOST;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		mb->un.varDmp.entry_index = 0;
+	mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
+	mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	return;
+}
+
+/**
+ * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read NVRAM mailbox command returns the HBA's non-volatile parameters
+ * that are used as defaults when the Fibre Channel link is brought on-line.
+ *
+ * This routine prepares the mailbox command for reading information stored
+ * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
+ **/
+void
+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_READ_NV;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_config_async - Prepare a mailbox command for enabling HBA async event
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @ring: ring number for the asynchronous event to be configured.
+ *
+ * The asynchronous event enable mailbox command is used to enable the
+ * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
+ * specifies the default ring to which events are posted.
+ *
+ * This routine prepares the mailbox command for enabling HBA asynchronous
+ * event support on a IOCB ring.
+ **/
+void
+lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
+		uint32_t ring)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
+	mb->un.varCfgAsyncEvent.ring = ring;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_heart_beat - Prepare a mailbox command for heart beat
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The heart beat mailbox command is used to detect an unresponsive HBA, which
+ * is defined as any device where no error attention is sent and both mailbox
+ * and rings are not processed.
+ *
+ * This routine prepares the mailbox command for issuing a heart beat in the
+ * form of mailbox command to the HBA. The timely completion of the heart
+ * beat mailbox command indicates the health of the HBA.
+ **/
+void
+lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_HEARTBEAT;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @mp: DMA buffer memory for reading the link attention information into.
+ *
+ * The read topology mailbox command is issued to read the link topology
+ * information indicated by the HBA port when the Link Event bit of the Host
+ * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
+ * Attention ACQE is received from the port (For SLI-4). A Link Event
+ * Attention occurs based on an exception detected at the Fibre Channel link
+ * interface.
+ *
+ * This routine prepares the mailbox command for reading HBA link topology
+ * information. A DMA memory has been set aside and address passed to the
+ * HBA through @mp for the HBA to DMA link attention information into the
+ * memory as part of the execution of the mailbox command.
+ *
+ * Return codes
+ *    0 - Success (currently always return 0)
+ **/
+int
+lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		   struct lpfc_dmabuf *mp)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	INIT_LIST_HEAD(&mp->list);
+	mb->mbxCommand = MBX_READ_TOPOLOGY;
+	mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
+	mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
+
+	/* Save address for later completion and set the owner to host so that
+	 * the FW knows this mailbox is available for processing.
+	 */
+	pmb->context1 = (uint8_t *)mp;
+	mb->mbxOwner = OWN_HOST;
+	return (0);
+}
+
+/**
+ * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The clear link attention mailbox command is issued to clear the link event
+ * attention condition indicated by the Link Event bit of the Host Attention
+ * (HSTATT) register. The link event attention condition is cleared only if
+ * the event tag specified matches that of the current link event counter.
+ * The current event tag is read using the read link attention event mailbox
+ * command.
+ *
+ * This routine prepares the mailbox command for clearing HBA link attention
+ * information.
+ **/
+void
+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varClearLA.eventTag = phba->fc_eventTag;
+	mb->mbxCommand = MBX_CLEAR_LA;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure link mailbox command is used before the initialize link
+ * mailbox command to override default value and to configure link-oriented
+ * parameters such as DID address and various timers. Typically, this
+ * command would be used after an F_Port login to set the returned DID address
+ * and the fabric timeout values. This command is not valid before a configure
+ * port command has configured the HBA port.
+ *
+ * This routine prepares the mailbox command for configuring link on a HBA.
+ **/
+void
+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	struct lpfc_vport  *vport = phba->pport;
+	MAILBOX_t *mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	/* NEW_FEATURE
+	 * SLI-2, Coalescing Response Feature.
+	 */
+	if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
+		mb->un.varCfgLnk.cr = 1;
+		mb->un.varCfgLnk.ci = 1;
+		mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
+		mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
+	}
+
+	mb->un.varCfgLnk.myId = vport->fc_myDID;
+	mb->un.varCfgLnk.edtov = phba->fc_edtov;
+	mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
+	mb->un.varCfgLnk.ratov = phba->fc_ratov;
+	mb->un.varCfgLnk.rttov = phba->fc_rttov;
+	mb->un.varCfgLnk.altov = phba->fc_altov;
+	mb->un.varCfgLnk.crtov = phba->fc_crtov;
+	mb->un.varCfgLnk.cscn = 0;
+	if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
+		mb->un.varCfgLnk.cscn = 1;
+		mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
+						 &phba->sli4_hba.bbscn_params);
+	}
+
+	if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
+		mb->un.varCfgLnk.ack0_enable = 1;
+
+	mb->mbxCommand = MBX_CONFIG_LINK;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_config_msi - Prepare a mailbox command for configuring msi-x
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
+ * MSI-X multi-message interrupt vector association to interrupt attention
+ * conditions.
+ *
+ * Return codes
+ *    0 - Success
+ *    -EINVAL - Failure
+ **/
+int
+lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint32_t attentionConditions[2];
+
+	/* Sanity check */
+	if (phba->cfg_use_msi != 2) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0475 Not configured for supporting MSI-X "
+				"cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
+		return -EINVAL;
+	}
+
+	if (phba->sli_rev < 3) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0476 HBA not supporting SLI-3 or later "
+				"SLI Revision: 0x%x\n", phba->sli_rev);
+		return -EINVAL;
+	}
+
+	/* Clear mailbox command fields */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+
+	/*
+	 * SLI-3, Message Signaled Interrupt Fearure.
+	 */
+
+	/* Multi-message attention configuration */
+	attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
+				  HA_LATT | HA_MBATT);
+	attentionConditions[1] = 0;
+
+	mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
+	mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
+
+	/*
+	 * Set up message number to HA bit association
+	 */
+#ifdef __BIG_ENDIAN_BITFIELD
+	/* RA0 (FCP Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
+	/* RA1 (Other Protocol Extra Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
+#else   /*  __LITTLE_ENDIAN_BITFIELD */
+	/* RA0 (FCP Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
+	/* RA1 (Other Protocol Extra Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
+#endif
+	/* Multi-message interrupt autoclear configuration*/
+	mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
+	mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
+
+	/* For now, HBA autoclear does not work reliably, disable it */
+	mb->un.varCfgMSI.autoClearHA[0] = 0;
+	mb->un.varCfgMSI.autoClearHA[1] = 0;
+
+	/* Set command and owner bit */
+	mb->mbxCommand = MBX_CONFIG_MSI;
+	mb->mbxOwner = OWN_HOST;
+
+	return 0;
+}
+
+/**
+ * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @topology: the link topology for the link to be initialized to.
+ * @linkspeed: the link speed for the link to be initialized to.
+ *
+ * The initialize link mailbox command is used to initialize the Fibre
+ * Channel link. This command must follow a configure port command that
+ * establishes the mode of operation.
+ *
+ * This routine prepares the mailbox command for initializing link on a HBA
+ * with the specified link topology and speed.
+ **/
+void
+lpfc_init_link(struct lpfc_hba * phba,
+	       LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
+{
+	lpfc_vpd_t *vpd;
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	switch (topology) {
+	case FLAGS_TOPOLOGY_MODE_LOOP_PT:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+		break;
+	case FLAGS_TOPOLOGY_MODE_PT_PT:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+		break;
+	case FLAGS_TOPOLOGY_MODE_LOOP:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+		break;
+	case FLAGS_TOPOLOGY_MODE_PT_LOOP:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+		break;
+	case FLAGS_LOCAL_LB:
+		mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+		break;
+	}
+
+	if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+		mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
+		/* Failover is not tried for Lancer G6 */
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+		phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+	}
+
+	/* Enable asynchronous ABTS responses from firmware */
+	mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+
+	/* NEW_FEATURE
+	 * Setting up the link speed
+	 */
+	vpd = &phba->vpd;
+	if (vpd->rev.feaLevelHigh >= 0x02){
+		switch(linkspeed){
+		case LPFC_USER_LINK_SPEED_1G:
+			mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
+			break;
+		case LPFC_USER_LINK_SPEED_2G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
+			break;
+		case LPFC_USER_LINK_SPEED_4G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
+			break;
+		case LPFC_USER_LINK_SPEED_8G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
+			break;
+		case LPFC_USER_LINK_SPEED_10G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
+			break;
+		case LPFC_USER_LINK_SPEED_16G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
+			break;
+		case LPFC_USER_LINK_SPEED_32G:
+			mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
+			break;
+		case LPFC_USER_LINK_SPEED_AUTO:
+		default:
+			mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+			break;
+		}
+
+	}
+	else
+		mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+
+	mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
+	mb->mbxOwner = OWN_HOST;
+	mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
+	return;
+}
+
+/**
+ * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @vpi: virtual N_Port identifier.
+ *
+ * The read service parameter mailbox command is used to read the HBA port
+ * service parameters. The service parameters are read into the buffer
+ * specified directly by a BDE in the mailbox command. These service
+ * parameters may then be used to build the payload of an N_Port/F_POrt
+ * login request and reply (LOGI/ACC).
+ *
+ * This routine prepares the mailbox command for reading HBA port service
+ * parameters. The DMA memory is allocated in this function and the addresses
+ * are populated into the mailbox command for the HBA to DMA the service
+ * parameters into.
+ *
+ * Return codes
+ *    0 - Success
+ *    1 - DMA memory allocation failed
+ **/
+int
+lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
+{
+	struct lpfc_dmabuf *mp;
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->mbxOwner = OWN_HOST;
+
+	/* Get a buffer to hold the HBAs Service Parameters */
+
+	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		mb->mbxCommand = MBX_READ_SPARM64;
+		/* READ_SPARAM: no buffers */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			        "0301 READ_SPARAM: no buffers\n");
+		return (1);
+	}
+	INIT_LIST_HEAD(&mp->list);
+	mb->mbxCommand = MBX_READ_SPARM64;
+	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+	mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+	if (phba->sli_rev >= LPFC_SLI_REV3)
+		mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
+
+	/* save address for completion */
+	pmb->context1 = mp;
+
+	return (0);
+}
+
+/**
+ * lpfc_unreg_did - Prepare a mailbox command for unregistering DID
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregister DID mailbox command is used to unregister an N_Port/F_Port
+ * login for an unknown RPI by specifying the DID of a remote port. This
+ * command frees an RPI context in the HBA port. This has the effect of
+ * performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering a remote
+ * N_Port/F_Port (DID) login.
+ **/
+void
+lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
+	       LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varUnregDID.did = did;
+	mb->un.varUnregDID.vpi = vpi;
+	if ((vpi != 0xffff) &&
+	    (phba->sli_rev == LPFC_SLI_REV4))
+		mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
+
+	mb->mbxCommand = MBX_UNREG_D_ID;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_read_config - Prepare a mailbox command for reading HBA configuration
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read configuration mailbox command is used to read the HBA port
+ * configuration parameters. This mailbox command provides a method for
+ * seeing any parameters that may have changed via various configuration
+ * mailbox commands.
+ *
+ * This routine prepares the mailbox command for reading out HBA configuration
+ * parameters.
+ **/
+void
+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->mbxCommand = MBX_READ_CONFIG;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read link status mailbox command is used to read the link status from
+ * the HBA. Link status includes all link-related error counters. These
+ * counters are maintained by the HBA and originated in the link hardware
+ * unit. Note that all of these counters wrap.
+ *
+ * This routine prepares the mailbox command for reading out HBA link status.
+ **/
+void
+lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->mbxCommand = MBX_READ_LNK_STAT;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @param: pointer to memory holding the server parameters.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @rpi: the rpi to use in the registration (usually only used for SLI4.
+ *
+ * The registration login mailbox command is used to register an N_Port or
+ * F_Port login. This registration allows the HBA to cache the remote N_Port
+ * service parameters internally and thereby make the appropriate FC-2
+ * decisions. The remote port service parameters are handed off by the driver
+ * to the HBA using a descriptor entry that directly identifies a buffer in
+ * host memory. In exchange, the HBA returns an RPI identifier.
+ *
+ * This routine prepares the mailbox command for registering remote port login.
+ * The function allocates DMA buffer for passing the service parameters to the
+ * HBA with the mailbox command.
+ *
+ * Return codes
+ *    0 - Success
+ *    1 - DMA memory allocation failed
+ **/
+int
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+	     uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint8_t *sparam;
+	struct lpfc_dmabuf *mp;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varRegLogin.rpi = 0;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+	if (phba->sli_rev >= LPFC_SLI_REV3)
+		mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
+	mb->un.varRegLogin.did = did;
+	mb->mbxOwner = OWN_HOST;
+	/* Get a buffer to hold NPorts Service Parameters */
+	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		mb->mbxCommand = MBX_REG_LOGIN64;
+		/* REG_LOGIN: no buffers */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
+				"rpi x%x\n", vpi, did, rpi);
+		return 1;
+	}
+	INIT_LIST_HEAD(&mp->list);
+	sparam = mp->virt;
+
+	/* Copy param's into a new buffer */
+	memcpy(sparam, param, sizeof (struct serv_parm));
+
+	/* save address for completion */
+	pmb->context1 = (uint8_t *) mp;
+
+	mb->mbxCommand = MBX_REG_LOGIN64;
+	mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+	mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+	return 0;
+}
+
+/**
+ * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @rpi: remote port identifier
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration login mailbox command is used to unregister an N_Port
+ * or F_Port login. This command frees an RPI context in the HBA. It has the
+ * effect of performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering remote port
+ * login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
+ **/
+void
+lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
+		 LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varUnregLogin.rpi = rpi;
+	mb->un.varUnregLogin.rsvd1 = 0;
+	if (phba->sli_rev >= LPFC_SLI_REV3)
+		mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
+
+	mb->mbxCommand = MBX_UNREG_LOGIN;
+	mb->mbxOwner = OWN_HOST;
+
+	return;
+}
+
+/**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		/*
+		 * For SLI4 functions, the rpi field is overloaded for
+		 * the vport context unreg all.  This routine passes
+		 * 0 for the rpi field in lpfc_unreg_login for compatibility
+		 * with SLI3 and then overrides the rpi field with the
+		 * expected value for SLI4.
+		 */
+		lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+				 mbox);
+		mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(mbox, phba->mbox_mem_pool);
+	}
+}
+
+/**
+ * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The registration vport identifier mailbox command is used to activate a
+ * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
+ * N_Port_ID against the information in the selected virtual N_Port context
+ * block and marks it active to allow normal processing of IOCB commands and
+ * received unsolicited exchanges.
+ *
+ * This routine prepares the mailbox command for registering a virtual N_Port.
+ **/
+void
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_hba *phba = vport->phba;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	/*
+	 * Set the re-reg VPI bit for f/w to update the MAC address.
+	 */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
+		mb->un.varRegVpi.upd = 1;
+
+	mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
+	mb->un.varRegVpi.sid = vport->fc_myDID;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+	else
+		mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+	memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
+	       sizeof(struct lpfc_name));
+	mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
+	mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
+
+	mb->mbxCommand = MBX_REG_VPI;
+	mb->mbxOwner = OWN_HOST;
+	return;
+
+}
+
+/**
+ * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration vport identifier mailbox command is used to inactivate
+ * a virtual N_Port. The driver must have logged out and unregistered all
+ * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
+ * unregisters any default RPIs associated with the specified vpi, aborting
+ * any active exchanges. The HBA will post the mailbox response after making
+ * the virtual N_Port inactive.
+ *
+ * This routine prepares the mailbox command for unregistering a virtual
+ * N_Port.
+ **/
+void
+lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	if (phba->sli_rev == LPFC_SLI_REV3)
+		mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+	else if (phba->sli_rev >= LPFC_SLI_REV4)
+		mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
+
+	mb->mbxCommand = MBX_UNREG_VPI;
+	mb->mbxOwner = OWN_HOST;
+	return;
+
+}
+
+/**
+ * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB)
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets up and initializes the IOCB rings in the Port Control
+ * Block (PCB).
+ **/
+static void
+lpfc_config_pcb_setup(struct lpfc_hba * phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	PCB_t *pcbp = phba->pcb;
+	dma_addr_t pdma_addr;
+	uint32_t offset;
+	uint32_t iocbCnt = 0;
+	int i;
+
+	pcbp->maxRing = (psli->num_rings - 1);
+
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->sli3_ring[i];
+
+		pring->sli.sli3.sizeCiocb =
+			phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+		pring->sli.sli3.sizeRiocb =
+			phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+		/* A ring MUST have both cmd and rsp entries defined to be
+		   valid */
+		if ((pring->sli.sli3.numCiocb == 0) ||
+			(pring->sli.sli3.numRiocb == 0)) {
+			pcbp->rdsc[i].cmdEntries = 0;
+			pcbp->rdsc[i].rspEntries = 0;
+			pcbp->rdsc[i].cmdAddrHigh = 0;
+			pcbp->rdsc[i].rspAddrHigh = 0;
+			pcbp->rdsc[i].cmdAddrLow = 0;
+			pcbp->rdsc[i].rspAddrLow = 0;
+			pring->sli.sli3.cmdringaddr = NULL;
+			pring->sli.sli3.rspringaddr = NULL;
+			continue;
+		}
+		/* Command ring setup for ring */
+		pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
+		pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
+
+		offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
+			 (uint8_t *) phba->slim2p.virt;
+		pdma_addr = phba->slim2p.phys + offset;
+		pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
+		pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
+		iocbCnt += pring->sli.sli3.numCiocb;
+
+		/* Response ring setup for ring */
+		pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
+
+		pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
+		offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
+			 (uint8_t *)phba->slim2p.virt;
+		pdma_addr = phba->slim2p.phys + offset;
+		pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
+		pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
+		iocbCnt += pring->sli.sli3.numRiocb;
+	}
+}
+
+/**
+ * lpfc_read_rev - Prepare a mailbox command for reading HBA revision
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read revision mailbox command is used to read the revision levels of
+ * the HBA components. These components include hardware units, resident
+ * firmware, and available firmware. HBAs that supports SLI-3 mode of
+ * operation provide different response information depending on the version
+ * requested by the driver.
+ *
+ * This routine prepares the mailbox command for reading HBA revision
+ * information.
+ **/
+void
+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->un.varRdRev.cv = 1;
+	mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
+	mb->mbxCommand = MBX_READ_REV;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+void
+lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_mqe *mqe;
+
+	switch (mb->mbxCommand) {
+	case  MBX_READ_REV:
+		 mqe = &pmb->u.mqe;
+		lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
+				 mqe->un.read_rev.fw_name, 16);
+		lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
+				 mqe->un.read_rev.ulp_fw_name, 16);
+		break;
+	default:
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
+ * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
+ * the Sequence Length Test using the fields in the Selection Profile 2
+ * extension in words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
+			struct lpfc_hbq_init  *hbq_desc)
+{
+	hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
+	hbqmb->profiles.profile2.maxlen     = hbq_desc->maxlen;
+	hbqmb->profiles.profile2.seqlenoff  = hbq_desc->seqlenoff;
+}
+
+/**
+ * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
+ * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
+ * the Sequence Length Test and Byte Field Test using the fields in the
+ * Selection Profile 3 extension in words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
+			struct lpfc_hbq_init  *hbq_desc)
+{
+	hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
+	hbqmb->profiles.profile3.maxlen     = hbq_desc->maxlen;
+	hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
+	hbqmb->profiles.profile3.seqlenoff  = hbq_desc->seqlenoff;
+	memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
+	       sizeof(hbqmb->profiles.profile3.cmdmatch));
+}
+
+/**
+ * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
+ * HBA tests the initial frame of an incoming sequence using the frame's
+ * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
+ * and Byte Field Test using the fields in the Selection Profile 5 extension
+ * words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
+			struct lpfc_hbq_init  *hbq_desc)
+{
+	hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
+	hbqmb->profiles.profile5.maxlen     = hbq_desc->maxlen;
+	hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
+	hbqmb->profiles.profile5.seqlenoff  = hbq_desc->seqlenoff;
+	memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
+	       sizeof(hbqmb->profiles.profile5.cmdmatch));
+}
+
+/**
+ * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ
+ * @phba: pointer to lpfc hba data structure.
+ * @id: HBQ identifier.
+ * @hbq_desc: pointer to the HBA descriptor data structure.
+ * @hbq_entry_index: index of the HBQ entry data structures.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
+ * an HBQ. The configuration binds events that require buffers to a particular
+ * ring and HBQ based on a selection profile.
+ *
+ * This routine prepares the mailbox command for configuring an HBQ.
+ **/
+void
+lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
+		 struct lpfc_hbq_init *hbq_desc,
+		uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
+{
+	int i;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	hbqmb->hbqId = id;
+	hbqmb->entry_count = hbq_desc->entry_count;   /* # entries in HBQ */
+	hbqmb->recvNotify = hbq_desc->rn;             /* Receive
+						       * Notification */
+	hbqmb->numMask    = hbq_desc->mask_count;     /* # R_CTL/TYPE masks
+						       * # in words 0-19 */
+	hbqmb->profile    = hbq_desc->profile;	      /* Selection profile:
+						       * 0 = all,
+						       * 7 = logentry */
+	hbqmb->ringMask   = hbq_desc->ring_mask;      /* Binds HBQ to a ring
+						       * e.g. Ring0=b0001,
+						       * ring2=b0100 */
+	hbqmb->headerLen  = hbq_desc->headerLen;      /* 0 if not profile 4
+						       * or 5 */
+	hbqmb->logEntry   = hbq_desc->logEntry;       /* Set to 1 if this
+						       * HBQ will be used
+						       * for LogEntry
+						       * buffers */
+	hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
+		hbq_entry_index * sizeof(struct lpfc_hbq_entry);
+	hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
+
+	mb->mbxCommand = MBX_CONFIG_HBQ;
+	mb->mbxOwner = OWN_HOST;
+
+				/* Copy info for profiles 2,3,5. Other
+				 * profiles this area is reserved
+				 */
+	if (hbq_desc->profile == 2)
+		lpfc_build_hbq_profile2(hbqmb, hbq_desc);
+	else if (hbq_desc->profile == 3)
+		lpfc_build_hbq_profile3(hbqmb, hbq_desc);
+	else if (hbq_desc->profile == 5)
+		lpfc_build_hbq_profile5(hbqmb, hbq_desc);
+
+	/* Return if no rctl / type masks for this HBQ */
+	if (!hbq_desc->mask_count)
+		return;
+
+	/* Otherwise we setup specific rctl / type masks for this HBQ */
+	for (i = 0; i < hbq_desc->mask_count; i++) {
+		hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
+		hbqmb->hbqMasks[i].tmask  = hbq_desc->hbqMasks[i].tmask;
+		hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
+		hbqmb->hbqMasks[i].rctlmask  = hbq_desc->hbqMasks[i].rctlmask;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring
+ * @phba: pointer to lpfc hba data structure.
+ * @ring:
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure ring mailbox command is used to configure an IOCB ring. This
+ * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
+ * ring. This is used to map incoming sequences to a particular ring whose
+ * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
+ * attempt to configure a ring whose number is greater than the number
+ * specified in the Port Control Block (PCB). It is an error to issue the
+ * configure ring command more than once with the same ring number. The HBA
+ * returns an error if the driver attempts this.
+ *
+ * This routine prepares the mailbox command for configuring IOCB ring.
+ **/
+void
+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
+{
+	int i;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varCfgRing.ring = ring;
+	mb->un.varCfgRing.maxOrigXchg = 0;
+	mb->un.varCfgRing.maxRespXchg = 0;
+	mb->un.varCfgRing.recvNotify = 1;
+
+	psli = &phba->sli;
+	pring = &psli->sli3_ring[ring];
+	mb->un.varCfgRing.numMask = pring->num_mask;
+	mb->mbxCommand = MBX_CONFIG_RING;
+	mb->mbxOwner = OWN_HOST;
+
+	/* Is this ring configured for a specific profile */
+	if (pring->prt[0].profile) {
+		mb->un.varCfgRing.profile = pring->prt[0].profile;
+		return;
+	}
+
+	/* Otherwise we setup specific rctl / type masks for this ring */
+	for (i = 0; i < pring->num_mask; i++) {
+		mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
+		if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
+			mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
+		else
+			mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
+		mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
+		mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_config_port - Prepare a mailbox command for configuring port
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure port mailbox command is used to identify the Port Control
+ * Block (PCB) in the driver memory. After this command is issued, the
+ * driver must not access the mailbox in the HBA without first resetting
+ * the HBA. The HBA may copy the PCB information to internal storage for
+ * subsequent use; the driver can not change the PCB information unless it
+ * resets the HBA.
+ *
+ * This routine prepares the mailbox command for configuring port.
+ **/
+void
+lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
+	MAILBOX_t *mb = &pmb->u.mb;
+	dma_addr_t pdma_addr;
+	uint32_t bar_low, bar_high;
+	size_t offset;
+	struct lpfc_hgp hgp;
+	int i;
+	uint32_t pgp_offset;
+
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_CONFIG_PORT;
+	mb->mbxOwner = OWN_HOST;
+
+	mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
+
+	offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
+	pdma_addr = phba->slim2p.phys + offset;
+	mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
+	mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+
+	/* Always Host Group Pointer is in SLIM */
+	mb->un.varCfgPort.hps = 1;
+
+	/* If HBA supports SLI=3 ask for it */
+
+	if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
+		if (phba->cfg_enable_bg)
+			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+		if (phba->cfg_enable_dss)
+			mb->un.varCfgPort.cdss = 1; /* Configure Security */
+		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
+		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
+		if (phba->max_vpi && phba->cfg_enable_npiv &&
+		    phba->vpd.sli3Feat.cmv) {
+			mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
+			mb->un.varCfgPort.cmv = 1;
+		} else
+			mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
+	} else
+		phba->sli_rev = LPFC_SLI_REV2;
+	mb->un.varCfgPort.sli_mode = phba->sli_rev;
+
+	/* If this is an SLI3 port, configure async status notification. */
+	if (phba->sli_rev == LPFC_SLI_REV3)
+		mb->un.varCfgPort.casabt = 1;
+
+	/* Now setup pcb */
+	phba->pcb->type = TYPE_NATIVE_SLI2;
+	phba->pcb->feature = FEATURE_INITIAL_SLI2;
+
+	/* Setup Mailbox pointers */
+	phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
+	offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
+	pdma_addr = phba->slim2p.phys + offset;
+	phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
+	phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
+
+	/*
+	 * Setup Host Group ring pointer.
+	 *
+	 * For efficiency reasons, the ring get/put pointers can be
+	 * placed in adapter memory (SLIM) rather than in host memory.
+	 * This allows firmware to avoid PCI reads/writes when updating
+	 * and checking pointers.
+	 *
+	 * The firmware recognizes the use of SLIM memory by comparing
+	 * the address of the get/put pointers structure with that of
+	 * the SLIM BAR (BAR0).
+	 *
+	 * Caution: be sure to use the PCI config space value of BAR0/BAR1
+	 * (the hardware's view of the base address), not the OS's
+	 * value of pci_resource_start() as the OS value may be a cookie
+	 * for ioremap/iomap.
+	 */
+
+
+	pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
+	pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+
+	/*
+	 * Set up HGP - Port Memory
+	 *
+	 * The port expects the host get/put pointers to reside in memory
+	 * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
+	 * area of SLIM.  In SLI-2 mode, there's an additional 16 reserved
+	 * words (0x40 bytes).  This area is not reserved if HBQs are
+	 * configured in SLI-3.
+	 *
+	 * CR0Put    - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
+	 * RR0Get                      0xc4              0x84
+	 * CR1Put                      0xc8              0x88
+	 * RR1Get                      0xcc              0x8c
+	 * CR2Put                      0xd0              0x90
+	 * RR2Get                      0xd4              0x94
+	 * CR3Put                      0xd8              0x98
+	 * RR3Get                      0xdc              0x9c
+	 *
+	 * Reserved                    0xa0-0xbf
+	 *    If HBQs configured:
+	 *                         HBQ 0 Put ptr  0xc0
+	 *                         HBQ 1 Put ptr  0xc4
+	 *                         HBQ 2 Put ptr  0xc8
+	 *                         ......
+	 *                         HBQ(M-1)Put Pointer 0xc0+(M-1)*4
+	 *
+	 */
+
+	if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
+		phba->host_gp = &phba->mbox->us.s2.host[0];
+		phba->hbq_put = NULL;
+		offset = (uint8_t *)&phba->mbox->us.s2.host -
+			(uint8_t *)phba->slim2p.virt;
+		pdma_addr = phba->slim2p.phys + offset;
+		phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
+		phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
+	} else {
+		/* Always Host Group Pointer is in SLIM */
+		mb->un.varCfgPort.hps = 1;
+
+		if (phba->sli_rev == 3) {
+			phba->host_gp = &mb_slim->us.s3.host[0];
+			phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+		} else {
+			phba->host_gp = &mb_slim->us.s2.host[0];
+			phba->hbq_put = NULL;
+		}
+
+		/* mask off BAR0's flag bits 0 - 3 */
+		phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+			(void __iomem *)phba->host_gp -
+			(void __iomem *)phba->MBslimaddr;
+		if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+			phba->pcb->hgpAddrHigh = bar_high;
+		else
+			phba->pcb->hgpAddrHigh = 0;
+		/* write HGP data to SLIM at the required longword offset */
+		memset(&hgp, 0, sizeof(struct lpfc_hgp));
+
+		for (i = 0; i < phba->sli.num_rings; i++) {
+			lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+				    sizeof(*phba->host_gp));
+		}
+	}
+
+	/* Setup Port Group offset */
+	if (phba->sli_rev == 3)
+		pgp_offset = offsetof(struct lpfc_sli2_slim,
+				      mbx.us.s3_pgp.port);
+	else
+		pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
+	pdma_addr = phba->slim2p.phys + pgp_offset;
+	phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
+	phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
+
+	/* Use callback routine to setp rings in the pcb */
+	lpfc_config_pcb_setup(phba);
+
+	/* special handling for LC HBAs */
+	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+		uint32_t hbainit[5];
+
+		lpfc_hba_init(phba, hbainit);
+
+		memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
+	}
+
+	/* Swap PCB if needed */
+	lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
+}
+
+/**
+ * lpfc_kill_board - Prepare a mailbox command for killing board
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The kill board mailbox command is used to tell firmware to perform a
+ * graceful shutdown of a channel on a specified board to prepare for reset.
+ * When the kill board mailbox command is received, the ER3 bit is set to 1
+ * in the Host Status register and the ER Attention bit is set to 1 in the
+ * Host Attention register of the HBA function that received the kill board
+ * command.
+ *
+ * This routine prepares the mailbox command for killing the board in
+ * preparation for a graceful shutdown.
+ **/
+void
+lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_KILL_BOARD;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time.
+ **/
+void
+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+	struct lpfc_sli *psli;
+
+	psli = &phba->sli;
+
+	list_add_tail(&mbq->list, &psli->mboxq);
+
+	psli->mboxq_cnt++;
+
+	return;
+}
+
+/**
+ * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time. After HBA finished processing a mailbox
+ * command, the driver will remove a pending mailbox command from the head of
+ * the mailbox command queue and send to the HBA for processing.
+ *
+ * Return codes
+ *    pointer to the driver internal queue element for mailbox command.
+ **/
+LPFC_MBOXQ_t *
+lpfc_mbox_get(struct lpfc_hba * phba)
+{
+	LPFC_MBOXQ_t *mbq = NULL;
+	struct lpfc_sli *psli = &phba->sli;
+
+	list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
+	if (mbq)
+		psli->mboxq_cnt--;
+
+	return mbq;
+}
+
+/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
+/**
+ * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+	unsigned long iflag;
+
+	/* This function expects to be called from interrupt context */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	__lpfc_mbox_cmpl_put(phba, mbq);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	/* Mailbox command that have a completion handler must also have a
+	 * vport specified.
+	 */
+	if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+	    mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+		if (!mboxq->vport) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+					"1814 Mbox x%x failed, no vport\n",
+					mboxq->u.mb.mbxCommand);
+			dump_stack();
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+	/* If the PCI channel is in offline state, do not issue mbox */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -ENODEV;
+
+	/* If the HBA is in error state, do not issue mbox */
+	if (phba->link_state == LPFC_HBA_ERROR)
+		return -ENODEV;
+
+	return 0;
+}
+
+/**
+ * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
+ * @phba: pointer to lpfc hba data structure.
+ * @cmd: mailbox command code.
+ *
+ * This routine retrieves the proper timeout value according to the mailbox
+ * command code.
+ *
+ * Return codes
+ *    Timeout value to be used for the given mailbox command
+ **/
+int
+lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	MAILBOX_t *mbox = &mboxq->u.mb;
+	uint8_t subsys, opcode;
+
+	switch (mbox->mbxCommand) {
+	case MBX_WRITE_NV:	/* 0x03 */
+	case MBX_DUMP_MEMORY:	/* 0x17 */
+	case MBX_UPDATE_CFG:	/* 0x1B */
+	case MBX_DOWN_LOAD:	/* 0x1C */
+	case MBX_DEL_LD_ENTRY:	/* 0x1D */
+	case MBX_WRITE_VPARMS:	/* 0x32 */
+	case MBX_LOAD_AREA:	/* 0x81 */
+	case MBX_WRITE_WWN:     /* 0x98 */
+	case MBX_LOAD_EXP_ROM:	/* 0x9C */
+	case MBX_ACCESS_VDATA:	/* 0xA5 */
+		return LPFC_MBOX_TMO_FLASH_CMD;
+	case MBX_SLI4_CONFIG:	/* 0x9b */
+		subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
+		opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
+		if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
+			switch (opcode) {
+			case LPFC_MBOX_OPCODE_READ_OBJECT:
+			case LPFC_MBOX_OPCODE_WRITE_OBJECT:
+			case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
+			case LPFC_MBOX_OPCODE_DELETE_OBJECT:
+			case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
+			case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
+			case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
+			case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
+			case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
+			case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
+			case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
+			case LPFC_MBOX_OPCODE_RESET_LICENSES:
+			case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
+			case LPFC_MBOX_OPCODE_GET_VPD_DATA:
+			case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
+				return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+			}
+		}
+		if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
+			switch (opcode) {
+			case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
+				return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+			}
+		}
+		return LPFC_MBOX_SLI4_CONFIG_TMO;
+	}
+	return LPFC_MBOX_TMO;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      dma_addr_t phyaddr, uint32_t length)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+	nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+	nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      struct lpfc_mbx_sge *sge)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+	sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+	sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	struct lpfc_mbx_sge sge;
+	dma_addr_t phyaddr;
+	uint32_t sgecount, sgentry;
+
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, just free the mbox command */
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+
+	/* For non-embedded mbox command, we need to free the pages first */
+	sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+	/* There is nothing we can do if there is no sge address array */
+	if (unlikely(!mbox->sge_array)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+	/* Each non-embedded DMA memory was allocated in the length of a page */
+	for (sgentry = 0; sgentry < sgecount; sgentry++) {
+		lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+		phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+				  mbox->sge_array->addr[sgentry], phyaddr);
+	}
+	/* Free the sge address array memory */
+	kfree(mbox->sge_array);
+	/* Finally, free the mailbox command itself */
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the  SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ *         for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+		 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+	struct lpfc_mbx_sli4_config *sli4_config;
+	union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+	uint32_t alloc_len;
+	uint32_t resid_len;
+	uint32_t pagen, pcount;
+	void *viraddr;
+	dma_addr_t phyaddr;
+
+	/* Set up SLI4 mailbox command header fields */
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+	/* Set up SLI4 ioctl command header fields */
+	sli4_config = &mbox->u.mqe.un.sli4_config;
+
+	/* Setup for the embedded mbox command */
+	if (emb) {
+		/* Set up main header fields */
+		bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+		sli4_config->header.cfg_mhdr.payload_length = length;
+		/* Set up sub-header fields following main header */
+		bf_set(lpfc_mbox_hdr_opcode,
+			&sli4_config->header.cfg_shdr.request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem,
+			&sli4_config->header.cfg_shdr.request, subsystem);
+		sli4_config->header.cfg_shdr.request.request_length =
+			length - LPFC_MBX_CMD_HDR_LENGTH;
+		return length;
+	}
+
+	/* Setup for the non-embedded mbox command */
+	pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
+	pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+				LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+	/* Allocate record for keeping SGE virtual addresses */
+	mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+				  GFP_KERNEL);
+	if (!mbox->sge_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2527 Failed to allocate non-embedded SGE "
+				"array.\n");
+		return 0;
+	}
+	for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+		/* The DMA memory is always allocated in the length of a
+		 * page even though the last SGE might not fill up to a
+		 * page, this is used as a priori size of SLI4_PAGE_SIZE for
+		 * the later DMA memory free.
+		 */
+		viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
+					      SLI4_PAGE_SIZE, &phyaddr,
+					      GFP_KERNEL);
+		/* In case of malloc fails, proceed with whatever we have */
+		if (!viraddr)
+			break;
+		mbox->sge_array->addr[pagen] = viraddr;
+		/* Keep the first page for later sub-header construction */
+		if (pagen == 0)
+			cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+		resid_len = length - alloc_len;
+		if (resid_len > SLI4_PAGE_SIZE) {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      SLI4_PAGE_SIZE);
+			alloc_len += SLI4_PAGE_SIZE;
+		} else {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      resid_len);
+			alloc_len = length;
+		}
+	}
+
+	/* Set up main header fields in mailbox command */
+	sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+	bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+	/* Set up sub-header fields into the first page */
+	if (pagen > 0) {
+		bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+		cfg_shdr->request.request_length =
+				alloc_len - sizeof(union  lpfc_sli4_cfg_shdr);
+	}
+	/* The sub-header is in DMA memory, which needs endian converstion */
+	if (cfg_shdr)
+		lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+				      sizeof(union  lpfc_sli4_cfg_shdr));
+	return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands.  It is called after lpfc_sli4_config.  The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+			   uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+	uint8_t opcode = 0;
+	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+	void *virtaddr = NULL;
+
+	/* Set up SLI4 ioctl command header fields */
+	if (emb == LPFC_SLI4_MBX_NEMBED) {
+		/* Get the first SGE entry from the non-embedded DMA memory */
+		virtaddr = mbox->sge_array->addr[0];
+		if (virtaddr == NULL)
+			return 1;
+		n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+	}
+
+	/*
+	 * The resource type is common to all extent Opcodes and resides in the
+	 * same position.
+	 */
+	if (emb == LPFC_SLI4_MBX_EMBED)
+		bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+		       &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+		       rsrc_type);
+	else {
+		/* This is DMA data.  Byteswap is required. */
+		bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+		       n_rsrc_extnt, rsrc_type);
+		lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+				      &n_rsrc_extnt->word4,
+				      sizeof(uint32_t));
+	}
+
+	/* Complete the initialization for the particular Opcode. */
+	opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
+	switch (opcode) {
+	case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+		if (emb == LPFC_SLI4_MBX_EMBED)
+			bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+			       &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+			       exts_count);
+		else
+			bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+			       n_rsrc_extnt, exts_count);
+		break;
+	case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+	case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+	case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+		/* Initialization is complete.*/
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2929 Resource Extent Opcode x%x is "
+				"unsupported\n", opcode);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the
+ * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall
+ * be returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+	if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+		return LPFC_MBOX_SUBSYSTEM_NA;
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, get opcode from embedded sub-header*/
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+		return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+	}
+
+	/* For non-embedded mbox command, get opcode from first dma page */
+	if (unlikely(!mbox->sge_array))
+		return LPFC_MBOX_SUBSYSTEM_NA;
+	cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+	return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if
+ * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be
+ * returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+	if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+		return LPFC_MBOX_OPCODE_NA;
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, get opcode from embedded sub-header*/
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+		return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+	}
+
+	/* For non-embedded mbox command, get opcode from first dma page */
+	if (unlikely(!mbox->sge_array))
+		return LPFC_MBOX_OPCODE_NA;
+	cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+	return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index to fcf table.
+ *
+ * This routine routine allocates and constructs non-embedded mailbox command
+ * for reading a FCF table entry referred by @fcf_index.
+ *
+ * Return: pointer to the mailbox command constructed if successful, otherwise
+ * NULL.
+ **/
+int
+lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
+			   struct lpfcMboxq *mboxq,
+			   uint16_t fcf_index)
+{
+	void *virt_addr;
+	uint8_t *bytep;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+	if (!mboxq)
+		return -ENOMEM;
+
+	req_len = sizeof(struct fcf_record) +
+		  sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+	/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+			LPFC_SLI4_MBX_NEMBED);
+
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"0291 Allocated DMA memory size (x%x) is "
+				"less than the requested DMA memory "
+				"size (x%x)\n", alloc_len, req_len);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	virt_addr = mboxq->sge_array->addr[0];
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+	/* Set up command fields */
+	bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+	/* Perform necessary endian conversion */
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+
+	return 0;
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+	/* Set up SLI4 mailbox command header fields */
+	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+	bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+	/* Set up host requested features. */
+	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+	bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Enable DIF (block guard) only if configured to do so. */
+	if (phba->cfg_enable_bg)
+		bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Enable NPIV only if configured to do so. */
+	if (phba->max_vpi && phba->cfg_enable_npiv)
+		bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	if (phba->nvmet_support) {
+		bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
+		/* iaab/iaar NOT set for now */
+		 bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
+		 bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
+	}
+	return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+	struct lpfc_mbx_init_vfi *init_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	mbox->vport = vport;
+	init_vfi = &mbox->u.mqe.un.init_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+	bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vp, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vfi, init_vfi,
+	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+	bf_set(lpfc_init_vfi_vpi, init_vfi,
+	       vport->phba->vpi_ids[vport->vpi]);
+	bf_set(lpfc_init_vfi_fcfi, init_vfi,
+	       vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+	struct lpfc_mbx_reg_vfi *reg_vfi;
+	struct lpfc_hba *phba = vport->phba;
+	uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_vfi = &mbox->u.mqe.un.reg_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+	bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+	bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+	       phba->sli4_hba.vfi_ids[vport->vfi]);
+	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
+	bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
+	memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
+	reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
+	reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+	reg_vfi->e_d_tov = phba->fc_edtov;
+	reg_vfi->r_a_tov = phba->fc_ratov;
+	if (phys) {
+		reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+		reg_vfi->bde.addrLow = putPaddrLow(phys);
+		reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+		reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	}
+	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+
+	/* Only FC supports upd bit */
+	if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
+	    (vport->fc_flag & FC_VFI_REGISTERED) &&
+	    (!phba->fc_topology_changed)) {
+		bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
+		bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
+	}
+
+	bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
+	bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
+	bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
+
+	if (phba->bbcredit_support && phba->cfg_enable_bbcr  &&
+	    bbscn_fabric != 0) {
+		bbscn_max = bf_get(lpfc_bbscn_max,
+				   &phba->sli4_hba.bbscn_params);
+		if (bbscn_fabric <= bbscn_max) {
+			bbscn_def = bf_get(lpfc_bbscn_def,
+					   &phba->sli4_hba.bbscn_params);
+
+			if (bbscn_fabric > bbscn_def)
+				bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
+				       bbscn_fabric);
+			else
+				bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
+
+			bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
+		}
+	}
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+			"3134 Register VFI, mydid:x%x, fcfi:%d, "
+			" vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+			" port_state:x%x topology chg:%d bbscn_fabric :%d\n",
+			vport->fc_myDID,
+			phba->fcf.fcfi,
+			phba->sli4_hba.vfi_ids[vport->vfi],
+			phba->vpi_ids[vport->vpi],
+			reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+			vport->port_state, phba->fc_topology_changed,
+			bbscn_fabric);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @phba: pointer to the hba structure to init the VPI for.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port.  The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+	bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
+	       phba->vpi_ids[vpi]);
+	bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
+	       phba->sli4_hba.vfi_ids[phba->pport->vfi]);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+	bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
+	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+}
+
+/**
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
+ **/
+int
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_dmabuf *mp = NULL;
+	MAILBOX_t *mb;
+
+	memset(mbox, 0, sizeof(*mbox));
+	mb = &mbox->u.mb;
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		/* dump config region 23 failed to allocate memory */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			"2569 lpfc dump config region 23: memory"
+			" allocation failed\n");
+		return 1;
+	}
+
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+
+	/* save address for completion */
+	mbox->context1 = (uint8_t *) mp;
+
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.region_id = DMP_REGION_23;
+	mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	return 0;
+}
+
+static void
+lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	MAILBOX_t *mb;
+	int rc = FAILURE;
+	struct lpfc_rdp_context *rdp_context =
+			(struct lpfc_rdp_context *)(mboxq->context2);
+
+	mb = &mboxq->u.mb;
+	if (mb->mbxStatus)
+		goto mbx_failed;
+
+	memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
+
+	rc = SUCCESS;
+
+mbx_failed:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	rdp_context->cmpl(phba, rdp_context, rc);
+}
+
+static void
+lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
+	struct lpfc_rdp_context *rdp_context =
+			(struct lpfc_rdp_context *)(mbox->context2);
+
+	if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
+		goto error_mbuf_free;
+
+	lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
+				DMP_SFF_PAGE_A2_SIZE);
+
+	/* We don't need dma buffer for link stat. */
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+
+	memset(mbox, 0, sizeof(*mbox));
+	lpfc_read_lnk_stat(phba, mbox);
+	mbox->vport = rdp_context->ndlp->vport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
+	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+	if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
+		goto error_cmd_free;
+
+	return;
+
+error_mbuf_free:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+error_cmd_free:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	rdp_context->cmpl(phba, rdp_context, FAILURE);
+}
+
+void
+lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	int rc;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1);
+	struct lpfc_rdp_context *rdp_context =
+			(struct lpfc_rdp_context *)(mbox->context2);
+
+	if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
+		goto error;
+
+	lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
+				DMP_SFF_PAGE_A0_SIZE);
+
+	memset(mbox, 0, sizeof(*mbox));
+
+	memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+
+	/* save address for completion */
+	mbox->context1 = mp;
+	mbox->vport = rdp_context->ndlp->vport;
+
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
+	bf_set(lpfc_mbx_memory_dump_type3_type,
+		&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
+	bf_set(lpfc_mbx_memory_dump_type3_link,
+		&mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
+	bf_set(lpfc_mbx_memory_dump_type3_page_no,
+		&mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
+	bf_set(lpfc_mbx_memory_dump_type3_length,
+		&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
+	mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
+	mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
+
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
+	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		goto error;
+
+	return;
+
+error:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	rdp_context->cmpl(phba, rdp_context, FAILURE);
+}
+
+
+/*
+ * lpfc_sli4_dump_sfp_pagea0 - Dump sli4 read SFP Diagnostic.
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump configure
+ * type 3 page 0xA0.
+ */
+int
+lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_dmabuf *mp = NULL;
+
+	memset(mbox, 0, sizeof(*mbox));
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			"3569 dump type 3 page 0xA0 allocation failed\n");
+		return 1;
+	}
+
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
+	/* save address for completion */
+	mbox->context1 = mp;
+
+	bf_set(lpfc_mbx_memory_dump_type3_type,
+		&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
+	bf_set(lpfc_mbx_memory_dump_type3_link,
+		&mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
+	bf_set(lpfc_mbx_memory_dump_type3_page_no,
+		&mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
+	bf_set(lpfc_mbx_memory_dump_type3_length,
+		&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
+	mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
+	mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
+
+	return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+	if (phba->nvmet_support == 0) {
+		bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
+		       phba->sli4_hba.hdr_rq->queue_id);
+		/* Match everything - rq_id0 */
+		bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
+		bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
+		bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
+		bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
+
+		bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+
+		/* addr mode is bit wise inverted value of fcf addr_mode */
+		bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+		       (~phba->fcf.addr_mode) & 0x3);
+	} else {
+		/* This is ONLY for NVMET MRQ == 1 */
+		if (phba->cfg_nvmet_mrq != 1)
+			return;
+
+		bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
+		       phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
+		/* Match type FCP - rq_id0 */
+		bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
+		bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
+		bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
+		       FC_RCTL_DD_UNSOL_CMD);
+
+		bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
+		       phba->sli4_hba.hdr_rq->queue_id);
+		/* Match everything else - rq_id1 */
+		bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
+		bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
+		bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
+		bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
+	}
+	bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
+	       phba->fcf.current_rec.fcf_indx);
+	if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
+		bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+		bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
+		       phba->fcf.current_rec.vlan_id);
+	}
+}
+
+/**
+ * lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @mode: 0 to register FCFI, 1 to register MRQs
+ *
+ * The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
+{
+	struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
+
+	/* This is ONLY for MRQ */
+	if (phba->cfg_nvmet_mrq <= 1)
+		return;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
+	if (mode == 0) {
+		bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
+		       phba->fcf.current_rec.fcf_indx);
+		if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
+			bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
+			bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
+			       phba->fcf.current_rec.vlan_id);
+		}
+		return;
+	}
+
+	bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
+	       phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
+	/* Match NVME frames of type FCP (protocol NVME) - rq_id0 */
+	bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
+	bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
+	bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
+	bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
+	bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
+	bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
+
+	bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */
+	bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
+	bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */
+	bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
+
+	bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
+	       phba->sli4_hba.hdr_rq->queue_id);
+	/* Match everything - rq_id1 */
+	bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
+	bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
+	bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
+	bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
+
+	bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+	bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = ndlp->phba;
+	struct lpfc_mbx_resume_rpi *resume_rpi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	resume_rpi = &mbox->u.mqe.un.resume_rpi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+	bf_set(lpfc_resume_rpi_index, resume_rpi,
+	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+	bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
+	resume_rpi->event_tag = ndlp->phba->fc_eventTag;
+}
+
+/**
+ * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+ *                        mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES supported pages mailbox command is issued to
+ * retrieve the particular feature pages supported by the port.
+ **/
+void
+lpfc_supported_pages(struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_supp_pages *supp_pages;
+
+	memset(mbox, 0, sizeof(*mbox));
+	supp_pages = &mbox->u.mqe.un.supp_pages;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+	bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+}
+
+/**
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+ * retrieve the particular SLI4 features supported by the port.
+ **/
+void
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_pc_sli4_params *sli4_params;
+
+	memset(mbox, 0, sizeof(*mbox));
+	sli4_params = &mbox->u.mqe.un.sli4_params;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+	bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_mem.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_mem.c
new file mode 100644
index 0000000..87c08ff
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_mem.c
@@ -0,0 +1,768 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+
+#define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
+#define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
+#define LPFC_DEVICE_DATA_POOL_SIZE 64   /* max elements in device data pool */
+
+int
+lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
+	size_t bytes;
+	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+	if (max_xri <= 0)
+		return -ENOMEM;
+	bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
+		  sizeof(unsigned long);
+	phba->cfg_rrq_xri_bitmap_sz = bytes;
+	phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+							    bytes);
+	if (!phba->active_rrq_pool)
+		return -ENOMEM;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_mem_alloc - create and allocate all PCI and memory pools
+ * @phba: HBA to allocate pools for
+ *
+ * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
+ * lpfc_mbuf_pool, lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
+ * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
+ * allocation fails, frees all successfully allocated memory before returning.
+ *
+ * Returns:
+ *   0 on success
+ *   -ENOMEM on failure (if any memory allocations fail)
+ **/
+int
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
+{
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	int i;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Calculate alignment */
+		if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+			i = phba->cfg_sg_dma_buf_size;
+		else
+			i = SLI4_PAGE_SIZE;
+
+		phba->lpfc_sg_dma_buf_pool =
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev,
+					phba->cfg_sg_dma_buf_size,
+					i, 0);
+		if (!phba->lpfc_sg_dma_buf_pool)
+			goto fail;
+
+	} else {
+		phba->lpfc_sg_dma_buf_pool =
+			dma_pool_create("lpfc_sg_dma_buf_pool",
+					&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
+					align, 0);
+
+		if (!phba->lpfc_sg_dma_buf_pool)
+			goto fail;
+	}
+
+	phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
+							LPFC_BPL_SIZE,
+							align, 0);
+	if (!phba->lpfc_mbuf_pool)
+		goto fail_free_dma_buf_pool;
+
+	pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
+					 LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+	if (!pool->elements)
+		goto fail_free_lpfc_mbuf_pool;
+
+	pool->max_count = 0;
+	pool->current_count = 0;
+	for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
+		pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
+				       GFP_KERNEL, &pool->elements[i].phys);
+		if (!pool->elements[i].virt)
+			goto fail_free_mbuf_pool;
+		pool->max_count++;
+		pool->current_count++;
+	}
+
+	phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+							 sizeof(LPFC_MBOXQ_t));
+	if (!phba->mbox_mem_pool)
+		goto fail_free_mbuf_pool;
+
+	phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_nodelist));
+	if (!phba->nlp_mem_pool)
+		goto fail_free_mbox_pool;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		phba->rrq_pool =
+			mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_node_rrq));
+		if (!phba->rrq_pool)
+			goto fail_free_nlp_mem_pool;
+		phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
+					      &phba->pcidev->dev,
+					      LPFC_HDR_BUF_SIZE, align, 0);
+		if (!phba->lpfc_hrb_pool)
+			goto fail_free_rrq_mem_pool;
+
+		phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
+					      &phba->pcidev->dev,
+					      LPFC_DATA_BUF_SIZE, align, 0);
+		if (!phba->lpfc_drb_pool)
+			goto fail_free_hrb_pool;
+		phba->lpfc_hbq_pool = NULL;
+	} else {
+		phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
+			&phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
+		if (!phba->lpfc_hbq_pool)
+			goto fail_free_nlp_mem_pool;
+		phba->lpfc_hrb_pool = NULL;
+		phba->lpfc_drb_pool = NULL;
+	}
+
+	if (phba->cfg_EnableXLane) {
+		phba->device_data_mem_pool = mempool_create_kmalloc_pool(
+					LPFC_DEVICE_DATA_POOL_SIZE,
+					sizeof(struct lpfc_device_data));
+		if (!phba->device_data_mem_pool)
+			goto fail_free_drb_pool;
+	} else {
+		phba->device_data_mem_pool = NULL;
+	}
+
+	return 0;
+fail_free_drb_pool:
+	dma_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
+ fail_free_hrb_pool:
+	dma_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
+ fail_free_rrq_mem_pool:
+	mempool_destroy(phba->rrq_pool);
+	phba->rrq_pool = NULL;
+ fail_free_nlp_mem_pool:
+	mempool_destroy(phba->nlp_mem_pool);
+	phba->nlp_mem_pool = NULL;
+ fail_free_mbox_pool:
+	mempool_destroy(phba->mbox_mem_pool);
+	phba->mbox_mem_pool = NULL;
+ fail_free_mbuf_pool:
+	while (i--)
+		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+						 pool->elements[i].phys);
+	kfree(pool->elements);
+ fail_free_lpfc_mbuf_pool:
+	dma_pool_destroy(phba->lpfc_mbuf_pool);
+	phba->lpfc_mbuf_pool = NULL;
+ fail_free_dma_buf_pool:
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	phba->lpfc_sg_dma_buf_pool = NULL;
+ fail:
+	return -ENOMEM;
+}
+
+int
+lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
+{
+	phba->lpfc_nvmet_drb_pool =
+		dma_pool_create("lpfc_nvmet_drb_pool",
+				&phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
+				SGL_ALIGN_SZ, 0);
+	if (!phba->lpfc_nvmet_drb_pool) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6024 Can't enable NVME Target - no memory\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
+ * @phba: HBA to free memory for
+ *
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free(struct lpfc_hba *phba)
+{
+	int i;
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	struct lpfc_device_data *device_data;
+
+	/* Free HBQ pools */
+	lpfc_sli_hbqbuf_free_all(phba);
+	if (phba->lpfc_nvmet_drb_pool)
+		dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
+	phba->lpfc_nvmet_drb_pool = NULL;
+	if (phba->lpfc_drb_pool)
+		dma_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
+	if (phba->lpfc_hrb_pool)
+		dma_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
+	if (phba->txrdy_payload_pool)
+		dma_pool_destroy(phba->txrdy_payload_pool);
+	phba->txrdy_payload_pool = NULL;
+
+	if (phba->lpfc_hbq_pool)
+		dma_pool_destroy(phba->lpfc_hbq_pool);
+	phba->lpfc_hbq_pool = NULL;
+
+	if (phba->rrq_pool)
+		mempool_destroy(phba->rrq_pool);
+	phba->rrq_pool = NULL;
+
+	/* Free NLP memory pool */
+	mempool_destroy(phba->nlp_mem_pool);
+	phba->nlp_mem_pool = NULL;
+	if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
+		mempool_destroy(phba->active_rrq_pool);
+		phba->active_rrq_pool = NULL;
+	}
+
+	/* Free mbox memory pool */
+	mempool_destroy(phba->mbox_mem_pool);
+	phba->mbox_mem_pool = NULL;
+
+	/* Free MBUF memory pool */
+	for (i = 0; i < pool->current_count; i++)
+		dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+			      pool->elements[i].phys);
+	kfree(pool->elements);
+
+	dma_pool_destroy(phba->lpfc_mbuf_pool);
+	phba->lpfc_mbuf_pool = NULL;
+
+	/* Free DMA buffer memory pool */
+	dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+	phba->lpfc_sg_dma_buf_pool = NULL;
+
+	/* Free Device Data memory pool */
+	if (phba->device_data_mem_pool) {
+		/* Ensure all objects have been returned to the pool */
+		while (!list_empty(&phba->luns)) {
+			device_data = list_first_entry(&phba->luns,
+						       struct lpfc_device_data,
+						       listentry);
+			list_del(&device_data->listentry);
+			mempool_free(device_data, phba->device_data_mem_pool);
+		}
+		mempool_destroy(phba->device_data_mem_pool);
+	}
+	phba->device_data_mem_pool = NULL;
+	return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mbox, *next_mbox;
+	struct lpfc_dmabuf   *mp;
+
+	/* Free memory used in mailbox queue back to mailbox memory pool */
+	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
+		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		list_del(&mbox->list);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+	/* Free memory used in mailbox cmpl list back to mailbox memory pool */
+	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
+		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		list_del(&mbox->list);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+	/* Free the active mailbox command back to the mailbox memory pool */
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+	if (psli->mbox_active) {
+		mbox = psli->mbox_active;
+		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(mbox, phba->mbox_mem_pool);
+		psli->mbox_active = NULL;
+	}
+
+	/* Free and destroy all the allocated memory pools */
+	lpfc_mem_free(phba);
+
+	/* Free the iocb lookup array */
+	kfree(psli->iocbq_lookup);
+	psli->iocbq_lookup = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the mbuf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
+ * Allocates from generic dma_pool_alloc function first and if that fails and
+ * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
+ * HBA's pool.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.  Takes
+ * phba->hbalock.
+ *
+ * Returns:
+ *   pointer to the allocated mbuf on success
+ *   NULL on failure
+ **/
+void *
+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	unsigned long iflags;
+	void *ret;
+
+	ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
+		pool->current_count--;
+		ret = pool->elements[pool->current_count].virt;
+		*handle = pool->elements[pool->current_count].phys;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+}
+
+/**
+ * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Must be called with phba->hbalock held to synchronize access to
+ * lpfc_mbuf_safety_pool.
+ *
+ * Returns: None
+ **/
+void
+__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+	if (pool->current_count < pool->max_count) {
+		pool->elements[pool->current_count].virt = virt;
+		pool->elements[pool->current_count].phys = dma;
+		pool->current_count++;
+	} else {
+		dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+	}
+	return;
+}
+
+/**
+ * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_mbuf_free(phba, virt, dma);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return;
+}
+
+/**
+ * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
+ * lpfc_sg_dma_buf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the nvmet_buf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
+ * PCI pool.  Allocates from generic dma_pool_alloc function.
+ *
+ * Returns:
+ *   pointer to the allocated nvmet_buf on success
+ *   NULL on failure
+ **/
+void *
+lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+	void *ret;
+
+	ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
+	return ret;
+}
+
+/**
+ * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
+ * PCI pool
+ * @phba: HBA which owns the pool to return to
+ * @virt: nvmet_buf to free
+ * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
+ *
+ * Returns: None
+ **/
+void
+lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
+{
+	dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
+}
+
+/**
+ * lpfc_els_hbq_alloc - Allocate an HBQ buffer
+ * @phba: HBA to allocate HBQ buffer for
+ *
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_els_hbq_alloc(struct lpfc_hba *phba)
+{
+	struct hbq_dmabuf *hbqbp;
+
+	hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	if (!hbqbp)
+		return NULL;
+
+	hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+					  &hbqbp->dbuf.phys);
+	if (!hbqbp->dbuf.virt) {
+		kfree(hbqbp);
+		return NULL;
+	}
+	hbqbp->total_size = LPFC_BPL_SIZE;
+	return hbqbp;
+}
+
+/**
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * @phba: HBA buffer was allocated for
+ * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffer returned by
+ * lpfc_els_hbq_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
+{
+	dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	kfree(hbqbp);
+	return;
+}
+
+/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+	struct hbq_dmabuf *dma_buf;
+
+	dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	if (!dma_buf)
+		return NULL;
+
+	dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+					    &dma_buf->hbuf.phys);
+	if (!dma_buf->hbuf.virt) {
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+					    &dma_buf->dbuf.phys);
+	if (!dma_buf->dbuf.virt) {
+		dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+			      dma_buf->hbuf.phys);
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->total_size = LPFC_DATA_BUF_SIZE;
+	return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+	dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+	kfree(dmab);
+}
+
+/**
+ * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct rqb_dmabuf *
+lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
+{
+	struct rqb_dmabuf *dma_buf;
+
+	dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
+	if (!dma_buf)
+		return NULL;
+
+	dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+					    &dma_buf->hbuf.phys);
+	if (!dma_buf->hbuf.virt) {
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
+					    GFP_KERNEL, &dma_buf->dbuf.phys);
+	if (!dma_buf->dbuf.virt) {
+		dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+			      dma_buf->hbuf.phys);
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
+	return dma_buf;
+}
+
+/**
+ * lpfc_sli4_nvmet_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_nvmet_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
+{
+	dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	dma_pool_free(phba->lpfc_nvmet_drb_pool,
+		      dmab->dbuf.virt, dmab->dbuf.phys);
+	kfree(dmab);
+}
+
+/**
+ * lpfc_in_buf_free - Free a DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given if the
+ * HBA is running in SLI3 mode with HBQs enabled.
+ *
+ * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+	struct hbq_dmabuf *hbq_entry;
+	unsigned long flags;
+
+	if (!mp)
+		return;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
+		/* Check whether HBQ is still in use */
+		spin_lock_irqsave(&phba->hbalock, flags);
+		if (!phba->hbq_in_use) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return;
+		}
+		list_del(&hbq_entry->dbuf.list);
+		if (hbq_entry->tag == -1) {
+			(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+				(phba, hbq_entry);
+		} else {
+			lpfc_sli_free_hbq(phba, hbq_entry);
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+	} else {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+	}
+	return;
+}
+
+/**
+ * lpfc_rq_buf_free - Free a RQ DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given by
+ * reposting it to its associated RQ so it can be reused.
+ *
+ * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+	struct lpfc_rqb *rqbp;
+	struct lpfc_rqe hrqe;
+	struct lpfc_rqe drqe;
+	struct rqb_dmabuf *rqb_entry;
+	unsigned long flags;
+	int rc;
+
+	if (!mp)
+		return;
+
+	rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
+	rqbp = rqb_entry->hrq->rqbp;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_del(&rqb_entry->hbuf.list);
+	hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
+	hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
+	drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
+	drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
+	rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
+	if (rc < 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6409 Cannot post to RQ %d: %x %x\n",
+				rqb_entry->hrq->queue_id,
+				rqb_entry->hrq->host_index,
+				rqb_entry->hrq->hba_index);
+		(rqbp->rqb_free_buffer)(phba, rqb_entry);
+	} else {
+		list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
+		rqbp->buffer_count++;
+	}
+
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nl.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nl.h
new file mode 100644
index 0000000..b93e78f
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nl.h
@@ -0,0 +1,181 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2010 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Event definitions for RegisterForEvent */
+#define FC_REG_LINK_EVENT		0x0001	/* link up / down events */
+#define FC_REG_RSCN_EVENT		0x0002	/* RSCN events */
+#define FC_REG_CT_EVENT			0x0004	/* CT request events */
+#define FC_REG_DUMP_EVENT		0x0010	/* Dump events */
+#define FC_REG_TEMPERATURE_EVENT	0x0020	/* temperature events */
+#define FC_REG_VPORTRSCN_EVENT		0x0040	/* Vport RSCN events */
+#define FC_REG_ELS_EVENT		0x0080	/* lpfc els events */
+#define FC_REG_FABRIC_EVENT		0x0100	/* lpfc fabric events */
+#define FC_REG_SCSI_EVENT		0x0200	/* lpfc scsi events */
+#define FC_REG_BOARD_EVENT		0x0400	/* lpfc board events */
+#define FC_REG_ADAPTER_EVENT		0x0800	/* lpfc adapter events */
+#define FC_REG_EVENT_MASK		(FC_REG_LINK_EVENT | \
+						FC_REG_RSCN_EVENT | \
+						FC_REG_CT_EVENT | \
+						FC_REG_DUMP_EVENT | \
+						FC_REG_TEMPERATURE_EVENT | \
+						FC_REG_VPORTRSCN_EVENT | \
+						FC_REG_ELS_EVENT | \
+						FC_REG_FABRIC_EVENT | \
+						FC_REG_SCSI_EVENT | \
+						FC_REG_BOARD_EVENT | \
+						FC_REG_ADAPTER_EVENT)
+/* Temperature events */
+#define LPFC_CRIT_TEMP		0x1
+#define LPFC_THRESHOLD_TEMP	0x2
+#define LPFC_NORMAL_TEMP	0x3
+/*
+ * All net link event payloads will begin with and event type
+ * and subcategory. The event type must come first.
+ * The subcategory further defines the data that follows in the rest
+ * of the payload. Each category will have its own unique header plus
+ * any additional data unique to the subcategory.
+ * The payload sent via the fc transport is one-way driver->application.
+ */
+
+/* RSCN event header */
+struct lpfc_rscn_event_header {
+	uint32_t event_type;
+	uint32_t payload_length; /* RSCN data length in bytes */
+	uint32_t rscn_payload[];
+};
+
+/* els event header */
+struct lpfc_els_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_ELS_EVENT */
+#define LPFC_EVENT_PLOGI_RCV		0x01
+#define LPFC_EVENT_PRLO_RCV		0x02
+#define LPFC_EVENT_ADISC_RCV		0x04
+#define LPFC_EVENT_LSRJT_RCV		0x08
+#define LPFC_EVENT_LOGO_RCV		0x10
+
+/* special els lsrjt event */
+struct lpfc_lsrjt_event {
+	struct lpfc_els_event_header header;
+	uint32_t command;
+	uint32_t reason_code;
+	uint32_t explanation;
+};
+
+/* special els logo event */
+struct lpfc_logo_event {
+	struct lpfc_els_event_header header;
+	uint8_t logo_wwpn[8];
+};
+
+/* fabric event header */
+struct lpfc_fabric_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_FABRIC_EVENT */
+#define LPFC_EVENT_FABRIC_BUSY		0x01
+#define LPFC_EVENT_PORT_BUSY		0x02
+#define LPFC_EVENT_FCPRDCHKERR		0x04
+
+/* special case fabric fcprdchkerr event */
+struct lpfc_fcprdchkerr_event {
+	struct lpfc_fabric_event_header header;
+	uint32_t lun;
+	uint32_t opcode;
+	uint32_t fcpiparam;
+};
+
+
+/* scsi event header */
+struct lpfc_scsi_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+	uint32_t lun;
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_SCSI_EVENT */
+#define LPFC_EVENT_QFULL	0x0001
+#define LPFC_EVENT_DEVBSY	0x0002
+#define LPFC_EVENT_CHECK_COND	0x0004
+#define LPFC_EVENT_LUNRESET	0x0008
+#define LPFC_EVENT_TGTRESET	0x0010
+#define LPFC_EVENT_BUSRESET	0x0020
+#define LPFC_EVENT_VARQUEDEPTH	0x0040
+
+/* special case scsi varqueuedepth event */
+struct lpfc_scsi_varqueuedepth_event {
+	struct lpfc_scsi_event_header scsi_event;
+	uint32_t oldval;
+	uint32_t newval;
+};
+
+/* special case scsi check condition event */
+struct lpfc_scsi_check_condition_event {
+	struct lpfc_scsi_event_header scsi_event;
+	uint8_t opcode;
+	uint8_t sense_key;
+	uint8_t asc;
+	uint8_t ascq;
+};
+
+/* event codes for FC_REG_BOARD_EVENT */
+#define LPFC_EVENT_PORTINTERR		0x01
+
+/* board event header */
+struct lpfc_board_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+};
+
+
+/* event codes for FC_REG_ADAPTER_EVENT */
+#define LPFC_EVENT_ARRIVAL	0x01
+
+/* adapter event header */
+struct lpfc_adapter_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+};
+
+
+/* event codes for temp_event */
+#define LPFC_CRIT_TEMP		0x1
+#define LPFC_THRESHOLD_TEMP	0x2
+#define LPFC_NORMAL_TEMP	0x3
+
+struct temp_event {
+	uint32_t event_type;
+	uint32_t event_code;
+	uint32_t data;
+};
+
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nportdisc.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nportdisc.c
new file mode 100644
index 0000000..9641175
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -0,0 +1,2866 @@
+ /*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+
+/* Called to verify a rcv'ed ADISC was intended for us. */
+static int
+lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		 struct lpfc_name *nn, struct lpfc_name *pn)
+{
+	/* First, we MUST have a RPI registered */
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+		return 0;
+
+	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
+	 * table entry for that node.
+	 */
+	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
+		return 0;
+
+	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
+		return 0;
+
+	/* we match, return success */
+	return 1;
+}
+
+int
+lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		 struct serv_parm *sp, uint32_t class, int flogi)
+{
+	volatile struct serv_parm *hsp = &vport->fc_sparam;
+	uint16_t hsp_value, ssp_value = 0;
+
+	/*
+	 * The receive data field size and buffer-to-buffer receive data field
+	 * size entries are 16 bits but are represented as two 8-bit fields in
+	 * the driver data structure to account for rsvd bits and other control
+	 * bits.  Reconstruct and compare the fields as a 16-bit values before
+	 * correcting the byte values.
+	 */
+	if (sp->cls1.classValid) {
+		if (!flogi) {
+			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
+				     hsp->cls1.rcvDataSizeLsb);
+			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
+				     sp->cls1.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls1.rcvDataSizeLsb =
+					hsp->cls1.rcvDataSizeLsb;
+				sp->cls1.rcvDataSizeMsb =
+					hsp->cls1.rcvDataSizeMsb;
+			}
+		}
+	} else if (class == CLASS1)
+		goto bad_service_param;
+	if (sp->cls2.classValid) {
+		if (!flogi) {
+			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
+				     hsp->cls2.rcvDataSizeLsb);
+			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
+				     sp->cls2.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls2.rcvDataSizeLsb =
+					hsp->cls2.rcvDataSizeLsb;
+				sp->cls2.rcvDataSizeMsb =
+					hsp->cls2.rcvDataSizeMsb;
+			}
+		}
+	} else if (class == CLASS2)
+		goto bad_service_param;
+	if (sp->cls3.classValid) {
+		if (!flogi) {
+			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
+				     hsp->cls3.rcvDataSizeLsb);
+			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
+				     sp->cls3.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls3.rcvDataSizeLsb =
+					hsp->cls3.rcvDataSizeLsb;
+				sp->cls3.rcvDataSizeMsb =
+					hsp->cls3.rcvDataSizeMsb;
+			}
+		}
+	} else if (class == CLASS3)
+		goto bad_service_param;
+
+	/*
+	 * Preserve the upper four bits of the MSB from the PLOGI response.
+	 * These bits contain the Buffer-to-Buffer State Change Number
+	 * from the target and need to be passed to the FW.
+	 */
+	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
+	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
+	if (ssp_value > hsp_value) {
+		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
+		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
+				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
+	}
+
+	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
+	return 1;
+bad_service_param:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0207 Device %x "
+			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
+			 "invalid service parameters.  Ignoring device.\n",
+			 ndlp->nlp_DID,
+			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
+			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
+			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
+			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
+	return 0;
+}
+
+static void *
+lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_dmabuf *pcmd, *prsp;
+	uint32_t *lp;
+	void     *ptr = NULL;
+	IOCB_t   *irsp;
+
+	irsp = &rspiocb->iocb;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+	/* For lpfc_els_abort, context2 could be zero'ed to delay
+	 * freeing associated memory till after ABTS completes.
+	 */
+	if (pcmd) {
+		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
+				       list);
+		if (prsp) {
+			lp = (uint32_t *) prsp->virt;
+			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
+		}
+	} else {
+		/* Force ulpStatus error since we are returning NULL ptr */
+		if (!(irsp->ulpStatus)) {
+			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
+			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+		}
+		ptr = NULL;
+	}
+	return ptr;
+}
+
+
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with a LPFC_NODELIST entry. This
+ * routine effectively results in a "software abort".
+ */
+void
+lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(abort_list);
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	pring = lpfc_phba_elsring(phba);
+
+	/* In case of error recovery path, we might have a NULL pring here */
+	if (unlikely(!pring))
+		return;
+
+	/* Abort outstanding I/O on NPort <nlp_DID> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
+			 "2819 Abort outstanding I/O on NPort x%x "
+			 "Data: x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	/* Clean up all fabric IOs first.*/
+	lpfc_fabric_abort_nport(ndlp);
+
+	/*
+	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
+	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
+	 * txcmplq so that the abort operation completes them successfully.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+	/* Add to abort_list on on NDLP match. */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+			list_add_tail(&iocb->dlist, &abort_list);
+	}
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Abort the targeted IOs and remove them from the abort list. */
+	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
+			spin_lock_irq(&phba->hbalock);
+			list_del_init(&iocb->dlist);
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+			spin_unlock_irq(&phba->hbalock);
+	}
+
+	INIT_LIST_HEAD(&abort_list);
+
+	/* Now process the txq */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_lock(&pring->ring_lock);
+
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		/* Check to see if iocb matches the nport we are looking for */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+			list_del_init(&iocb->list);
+			list_add_tail(&iocb->list, &abort_list);
+		}
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &abort_list,
+			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+
+	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+}
+
+static int
+lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	       struct lpfc_iocbq *cmdiocb)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba    *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint64_t nlp_portwwn = 0;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	struct serv_parm *sp;
+	uint32_t ed_tov;
+	LPFC_MBOXQ_t *mbox;
+	struct ls_rjt stat;
+	uint32_t vid, flag;
+	int rc;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0140 PLOGI Reject: invalid nname\n");
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 0;
+	}
+	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0141 PLOGI Reject: invalid pname\n");
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 0;
+	}
+
+	nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
+		/* Reject this request because invalid parameters */
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 0;
+	}
+	icmd = &cmdiocb->iocb;
+
+	/* PLOGI chkparm OK */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+			 "x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+			 ndlp->nlp_rpi, vport->port_state,
+			 vport->fc_flag);
+
+	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
+		ndlp->nlp_fcp_info |= CLASS2;
+	else
+		ndlp->nlp_fcp_info |= CLASS3;
+
+	ndlp->nlp_class_sup = 0;
+	if (sp->cls1.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+	if (sp->cls2.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS2;
+	if (sp->cls3.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS3;
+	if (sp->cls4.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS4;
+	ndlp->nlp_maxframe =
+		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+	/* if already logged in, do implicit logout */
+	switch (ndlp->nlp_state) {
+	case  NLP_STE_NPR_NODE:
+		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+			break;
+	case  NLP_STE_REG_LOGIN_ISSUE:
+	case  NLP_STE_PRLI_ISSUE:
+	case  NLP_STE_UNMAPPED_NODE:
+	case  NLP_STE_MAPPED_NODE:
+		/* For initiators, lpfc_plogi_confirm_nport skips fabric did.
+		 * For target mode, execute implicit logo.
+		 * Fabric nodes go into NPR.
+		 */
+		if (!(ndlp->nlp_type & NLP_FABRIC) &&
+		    !(phba->nvmet_support)) {
+			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+					 ndlp, NULL);
+			return 1;
+		}
+		if (nlp_portwwn != 0 &&
+		    nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0143 PLOGI recv'd from DID: x%x "
+					 "WWPN changed: old %llx new %llx\n",
+					 ndlp->nlp_DID,
+					 (unsigned long long)nlp_portwwn,
+					 (unsigned long long)
+					 wwn_to_u64(sp->portName.u.wwn));
+
+		ndlp->nlp_prev_state = ndlp->nlp_state;
+		/* rport needs to be unregistered first */
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		break;
+	}
+
+	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
+	/* Check for Nport to NPort pt2pt protocol */
+	if ((vport->fc_flag & FC_PT2PT) &&
+	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+		/* rcv'ed PLOGI decides what our NPortId will be */
+		vport->fc_myDID = icmd->un.rcvels.parmRo;
+
+		ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+		if (sp->cmn.edtovResolution) {
+			/* E_D_TOV ticks are in nanoseconds */
+			ed_tov = (phba->fc_edtov + 999999) / 1000000;
+		}
+
+		/*
+		 * For pt-to-pt, use the larger EDTOV
+		 * RATOV = 2 * EDTOV
+		 */
+		if (ed_tov > phba->fc_edtov)
+			phba->fc_edtov = ed_tov;
+		phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
+
+		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+		/* Issue config_link / reg_vfi to account for updated TOV's */
+
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_issue_reg_vfi(vport);
+		else {
+			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+			if (mbox == NULL)
+				goto out;
+			lpfc_config_link(phba, mbox);
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mbox->vport = vport;
+			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+				goto out;
+			}
+		}
+
+		lpfc_can_disctmo(vport);
+	}
+
+	ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
+	if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
+	    sp->cmn.valid_vendor_ver_level) {
+		vid = be32_to_cpu(sp->un.vv.vid);
+		flag = be32_to_cpu(sp->un.vv.flags);
+		if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
+			ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		goto out;
+
+	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_unreg_rpi(vport, ndlp);
+
+	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
+	if (rc) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+
+	/* ACC PLOGI rsp command needs to execute first,
+	 * queue this mbox command to be processed later.
+	 */
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+	/*
+	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
+	 * command issued in lpfc_cmpl_els_acc().
+	 */
+	mbox->vport = vport;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+	spin_unlock_irq(shost->host_lock);
+
+	/*
+	 * If there is an outstanding PLOGI issued, abort it before
+	 * sending ACC rsp for received PLOGI. If pending plogi
+	 * is not canceled here, the plogi will be rejected by
+	 * remote port and will be retried. On a configuration with
+	 * single discovery thread, this will cause a huge delay in
+	 * discovery. Also this will cause multiple state machines
+	 * running in parallel for this node.
+	 * This only applies to a fabric environment.
+	 */
+	if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+	    (vport->fc_flag & FC_FABRIC)) {
+		/* software abort outstanding PLOGI */
+		lpfc_els_abort(phba, ndlp);
+	}
+
+	if ((vport->port_type == LPFC_NPIV_PORT &&
+	     vport->cfg_restrict_login)) {
+
+		/* In order to preserve RPIs, we want to cleanup
+		 * the default RPI the firmware created to rcv
+		 * this ELS request. The only way to do this is
+		 * to register, then unregister the RPI.
+		 */
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+		spin_unlock_irq(shost->host_lock);
+		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+			ndlp, mbox);
+		if (rc)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		return 1;
+	}
+	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+	if (rc)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return 1;
+out:
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object
+ *
+ * This routine is invoked to issue a completion to a rcv'ed
+ * ADISC or PDISC after the paused RPI has been resumed.
+ **/
+static void
+lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint32_t cmd;
+
+	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
+	ndlp = (struct lpfc_nodelist *) mboxq->context2;
+	vport = mboxq->vport;
+	cmd = elsiocb->drvrTimeout;
+
+	if (cmd == ELS_CMD_ADISC) {
+		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
+	} else {
+		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
+			ndlp, NULL);
+	}
+	kfree(elsiocb);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+static int
+lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		struct lpfc_iocbq *cmdiocb)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq  *elsiocb;
+	struct lpfc_dmabuf *pcmd;
+	struct serv_parm   *sp;
+	struct lpfc_name   *pnn, *ppn;
+	struct ls_rjt stat;
+	ADISC *ap;
+	IOCB_t *icmd;
+	uint32_t *lp;
+	uint32_t cmd;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	if (cmd == ELS_CMD_ADISC) {
+		ap = (ADISC *) lp;
+		pnn = (struct lpfc_name *) & ap->nodeName;
+		ppn = (struct lpfc_name *) & ap->portName;
+	} else {
+		sp = (struct serv_parm *) lp;
+		pnn = (struct lpfc_name *) & sp->nodeName;
+		ppn = (struct lpfc_name *) & sp->portName;
+	}
+
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+
+		/*
+		 * As soon as  we send ACC, the remote NPort can
+		 * start sending us data. Thus, for SLI4 we must
+		 * resume the RPI before the ACC goes out.
+		 */
+		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
+				GFP_KERNEL);
+			if (elsiocb) {
+
+				/* Save info from cmd IOCB used in rsp */
+				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
+					sizeof(struct lpfc_iocbq));
+
+				/* Save the ELS cmd */
+				elsiocb->drvrTimeout = cmd;
+
+				lpfc_sli4_resume_rpi(ndlp,
+					lpfc_mbx_cmpl_resume_rpi, elsiocb);
+				goto out;
+			}
+		}
+
+		if (cmd == ELS_CMD_ADISC) {
+			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
+		} else {
+			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+				ndlp, NULL);
+		}
+out:
+		/* If we are authenticated, move to the proper state */
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+		else
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+		return 1;
+	}
+	/* Reject this request because invalid parameters */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+
+	/* 1 sec timeout */
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	return 0;
+}
+
+static int
+lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba    *phba = vport->phba;
+	struct lpfc_vport **vports;
+	int i, active_vlink_present = 0 ;
+
+	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
+	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+	 * PLOGIs during LOGO storms from a device.
+	 */
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+	if (els_cmd == ELS_CMD_PRLO)
+		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	else
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if (ndlp->nlp_DID == Fabric_DID) {
+		if (vport->port_state <= LPFC_FDISC)
+			goto out;
+		lpfc_linkdown_port(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
+		spin_unlock_irq(shost->host_lock);
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports) {
+			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+					i++) {
+				if ((!(vports[i]->fc_flag &
+					FC_VPORT_LOGO_RCVD)) &&
+					(vports[i]->port_state > LPFC_FDISC)) {
+					active_vlink_present = 1;
+					break;
+				}
+			}
+			lpfc_destroy_vport_work_array(phba, vports);
+		}
+
+		/*
+		 * Don't re-instantiate if vport is marked for deletion.
+		 * If we are here first then vport_delete is going to wait
+		 * for discovery to complete.
+		 */
+		if (!(vport->load_flag & FC_UNLOADING) &&
+					active_vlink_present) {
+			/*
+			 * If there are other active VLinks present,
+			 * re-instantiate the Vlink using FDISC.
+			 */
+			mod_timer(&ndlp->nlp_delayfunc,
+				  jiffies + msecs_to_jiffies(1000));
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+			vport->port_state = LPFC_FDISC;
+		} else {
+			spin_lock_irq(shost->host_lock);
+			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_retry_pport_discovery(phba);
+		}
+	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+		((ndlp->nlp_type & NLP_FCP_TARGET) ||
+		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+		/* Only try to re-login if this is NOT a Fabric Node */
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000 * 1));
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
+
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	}
+out:
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+	spin_unlock_irq(shost->host_lock);
+	/* The driver has to wait until the ACC completes before it continues
+	 * processing the LOGO.  The action will resume in
+	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+	 * unreg_login, the driver waits so the ACC does not get aborted.
+	 */
+	return 0;
+}
+
+static void
+lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	PRLI *npr;
+	struct fc_rport *rport = ndlp->rport;
+	u32 roles;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+
+	if ((npr->prliType == PRLI_FCP_TYPE) ||
+	    (npr->prliType == PRLI_NVME_TYPE)) {
+		if (npr->initiatorFunc) {
+			if (npr->prliType == PRLI_FCP_TYPE)
+				ndlp->nlp_type |= NLP_FCP_INITIATOR;
+			if (npr->prliType == PRLI_NVME_TYPE)
+				ndlp->nlp_type |= NLP_NVME_INITIATOR;
+		}
+		if (npr->targetFunc) {
+			if (npr->prliType == PRLI_FCP_TYPE)
+				ndlp->nlp_type |= NLP_FCP_TARGET;
+			if (npr->prliType == PRLI_NVME_TYPE)
+				ndlp->nlp_type |= NLP_NVME_TARGET;
+			if (npr->writeXferRdyDis)
+				ndlp->nlp_flag |= NLP_FIRSTBURST;
+		}
+		if (npr->Retry)
+			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+
+		/* If this driver is in nvme target mode, set the ndlp's fc4
+		 * type to NVME provided the PRLI response claims NVME FC4
+		 * type.  Target mode does not issue gft_id so doesn't get
+		 * the fc4 type set until now.
+		 */
+		if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
+			ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		}
+		if (npr->prliType == PRLI_FCP_TYPE)
+			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+	}
+	if (rport) {
+		/* We need to update the rport role values */
+		roles = FC_RPORT_ROLE_UNKNOWN;
+		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+			"rport rolechg:   role:x%x did:x%x flg:x%x",
+			roles, ndlp->nlp_DID, ndlp->nlp_flag);
+
+		if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+			fc_remote_port_rolechg(rport, roles);
+	}
+}
+
+static uint32_t
+lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+
+	if (!(vport->fc_flag & FC_PT2PT)) {
+		/* Check config parameter use-adisc or FCP-2 */
+		if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
+		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
+		     (ndlp->nlp_type & NLP_FCP_TARGET)))) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			return 1;
+		}
+	}
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_unreg_rpi(vport, ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
+ * @phba : Pointer to lpfc_hba structure.
+ * @vport: Pointer to lpfc_vport structure.
+ * @rpi  : rpi to be release.
+ *
+ * This function will send a unreg_login mailbox command to the firmware
+ * to release a rpi.
+ **/
+void
+lpfc_release_rpi(struct lpfc_hba *phba,
+		struct lpfc_vport *vport,
+		uint16_t rpi)
+{
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+			GFP_KERNEL);
+	if (!pmb)
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			"2796 mailbox memory allocation failed \n");
+	else {
+		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(pmb, phba->mbox_mem_pool);
+	}
+}
+
+static uint32_t
+lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		  void *arg, uint32_t evt)
+{
+	struct lpfc_hba *phba;
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	uint16_t rpi;
+
+	phba = vport->phba;
+	/* Release the RPI if reglogin completing */
+	if (!(phba->pport->load_flag & FC_UNLOADING) &&
+		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
+		(!pmb->u.mb.mbxStatus)) {
+		rpi = pmb->u.mb.un.varWords[0];
+		lpfc_release_rpi(phba, vport, rpi);
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0271 Illegal State Transition: node x%x "
+			 "event x%x, state x%x Data: x%x x%x\n",
+			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+			 ndlp->nlp_flag);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		  void *arg, uint32_t evt)
+{
+	/* This transition is only legal if we previously
+	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
+	 * working on the same NPortID, do nothing for this thread
+	 * to stop it.
+	 */
+	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0272 Illegal State Transition: node x%x "
+			 "event x%x, state x%x Data: x%x x%x\n",
+			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+			 ndlp->nlp_flag);
+	}
+	return ndlp->nlp_state;
+}
+
+/* Start of Discovery State Machine routines */
+
+static uint32_t
+lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		return ndlp->nlp_state;
+	}
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	lpfc_issue_els_logo(vport, ndlp, 0);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = arg;
+	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	uint32_t *lp = (uint32_t *) pcmd->virt;
+	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
+	struct ls_rjt stat;
+	int port_cmp;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+
+	/* For a PLOGI, we only accept if our portname is less
+	 * than the remote portname.
+	 */
+	phba->fc_stat.elsLogiCol++;
+	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
+			  sizeof(struct lpfc_name));
+
+	if (port_cmp >= 0) {
+		/* Reject this request because the remote node will accept
+		   ours */
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+	} else {
+		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+		    (vport->num_disc_nodes)) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			/* Check if there are more PLOGIs to be sent */
+			lpfc_more_plogi(vport);
+			if (vport->num_disc_nodes == 0) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
+	} /* If our portname was less */
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+				/* software abort outstanding PLOGI */
+	lpfc_els_abort(vport->phba, ndlp);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(phba, ndlp);
+
+	if (evt == NLP_EVT_RCV_LOGO) {
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	} else {
+		lpfc_issue_els_logo(vport, ndlp, 0);
+	}
+
+	/* Put ndlp in npr state set plogi timer for 1 sec */
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg,
+			    uint32_t evt)
+{
+	struct lpfc_hba    *phba = vport->phba;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq  *cmdiocb, *rspiocb;
+	struct lpfc_dmabuf *pcmd, *prsp, *mp;
+	uint32_t *lp;
+	uint32_t vid, flag;
+	IOCB_t *irsp;
+	struct serv_parm *sp;
+	uint32_t ed_tov;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+		/* Recovery from PLOGI collision logic */
+		return ndlp->nlp_state;
+	}
+
+	irsp = &rspiocb->iocb;
+
+	if (irsp->ulpStatus)
+		goto out;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	if (!prsp)
+		goto out;
+
+	lp = (uint32_t *) prsp->virt;
+	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+	/* Some switches have FDMI servers returning 0 for WWN */
+	if ((ndlp->nlp_DID != FDMI_DID) &&
+		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
+		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0142 PLOGI RSP: Invalid WWN.\n");
+		goto out;
+	}
+	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
+		goto out;
+	/* PLOGI chkparm OK */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_state,
+			 ndlp->nlp_flag, ndlp->nlp_rpi);
+	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
+		ndlp->nlp_fcp_info |= CLASS2;
+	else
+		ndlp->nlp_fcp_info |= CLASS3;
+
+	ndlp->nlp_class_sup = 0;
+	if (sp->cls1.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+	if (sp->cls2.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS2;
+	if (sp->cls3.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS3;
+	if (sp->cls4.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS4;
+	ndlp->nlp_maxframe =
+		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+	if ((vport->fc_flag & FC_PT2PT) &&
+	    (vport->fc_flag & FC_PT2PT_PLOGI)) {
+		ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+		if (sp->cmn.edtovResolution) {
+			/* E_D_TOV ticks are in nanoseconds */
+			ed_tov = (phba->fc_edtov + 999999) / 1000000;
+		}
+
+		ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
+		if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
+		    sp->cmn.valid_vendor_ver_level) {
+			vid = be32_to_cpu(sp->un.vv.vid);
+			flag = be32_to_cpu(sp->un.vv.flags);
+			if ((vid == LPFC_VV_EMLX_ID) &&
+			    (flag & LPFC_VV_SUPPRESS_RSP))
+				ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
+		}
+
+		/*
+		 * Use the larger EDTOV
+		 * RATOV = 2 * EDTOV for pt-to-pt
+		 */
+		if (ed_tov > phba->fc_edtov)
+			phba->fc_edtov = ed_tov;
+		phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
+
+		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+		/* Issue config_link / reg_vfi to account for updated TOV's */
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			lpfc_issue_reg_vfi(vport);
+		} else {
+			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+			if (!mbox) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0133 PLOGI: no memory "
+						 "for config_link "
+						 "Data: x%x x%x x%x x%x\n",
+						 ndlp->nlp_DID, ndlp->nlp_state,
+						 ndlp->nlp_flag, ndlp->nlp_rpi);
+				goto out;
+			}
+
+			lpfc_config_link(phba, mbox);
+
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mbox->vport = vport;
+			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+				goto out;
+			}
+		}
+	}
+
+	lpfc_unreg_rpi(vport, ndlp);
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0018 PLOGI: no memory for reg_login "
+				 "Data: x%x x%x x%x x%x\n",
+				 ndlp->nlp_DID, ndlp->nlp_state,
+				 ndlp->nlp_flag, ndlp->nlp_rpi);
+		goto out;
+	}
+
+	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
+		switch (ndlp->nlp_DID) {
+		case NameServer_DID:
+			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
+			break;
+		case FDMI_DID:
+			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
+			break;
+		default:
+			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+		}
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->vport = vport;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+		    != MBX_NOT_FINISHED) {
+			lpfc_nlp_set_state(vport, ndlp,
+					   NLP_STE_REG_LOGIN_ISSUE);
+			return ndlp->nlp_state;
+		}
+		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+		/* decrement node reference count to the failed mbox
+		 * command
+		 */
+		lpfc_nlp_put(ndlp);
+		mp = (struct lpfc_dmabuf *) mbox->context1;
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(mbox, phba->mbox_mem_pool);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0134 PLOGI: cannot issue reg_login "
+				 "Data: x%x x%x x%x x%x\n",
+				 ndlp->nlp_DID, ndlp->nlp_state,
+				 ndlp->nlp_flag, ndlp->nlp_rpi);
+	} else {
+		mempool_free(mbox, phba->mbox_mem_pool);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0135 PLOGI: cannot format reg_login "
+				 "Data: x%x x%x x%x x%x\n",
+				 ndlp->nlp_DID, ndlp->nlp_state,
+				 ndlp->nlp_flag, ndlp->nlp_rpi);
+	}
+
+
+out:
+	if (ndlp->nlp_DID == NameServer_DID) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0261 Cannot Register NameServer login\n");
+	}
+
+	/*
+	** In case the node reference counter does not go to zero, ensure that
+	** the stale state for the node is not processed.
+	*/
+
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DEFER_RM;
+	spin_unlock_irq(shost->host_lock);
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
+	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
+{
+	struct lpfc_hba *phba;
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint16_t rpi;
+
+	phba = vport->phba;
+	/* Release the RPI */
+	if (!(phba->pport->load_flag & FC_UNLOADING) &&
+		!mb->mbxStatus) {
+		rpi = pmb->u.mb.un.varWords[0];
+		lpfc_release_rpi(phba, vport, rpi);
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		/* software abort outstanding PLOGI */
+		lpfc_els_abort(vport->phba, ndlp);
+
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+static uint32_t
+lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(phba, ndlp);
+
+	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb;
+
+	/* software abort outstanding ADISC */
+	lpfc_els_abort(phba, ndlp);
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			if (vport->num_disc_nodes)
+				lpfc_more_adisc(vport);
+		}
+		return ndlp->nlp_state;
+	}
+	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* software abort outstanding ADISC */
+	lpfc_els_abort(phba, ndlp);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* Treat like rcv logo */
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+	ADISC *ap;
+	int rc;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+	irsp = &rspiocb->iocb;
+
+	if ((irsp->ulpStatus) ||
+	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
+		/* 1 sec timeout */
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000));
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+
+		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
+		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
+
+		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		lpfc_unreg_rpi(vport, ndlp);
+		return ndlp->nlp_state;
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
+		if (rc) {
+			/* Stay in state and retry. */
+			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+			return ndlp->nlp_state;
+		}
+	}
+
+	if (ndlp->nlp_type & NLP_FCP_TARGET) {
+		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+	} else {
+		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	}
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		/* software abort outstanding ADISC */
+		lpfc_els_abort(vport->phba, ndlp);
+
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+static uint32_t
+lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	/* software abort outstanding ADISC */
+	lpfc_els_abort(phba, ndlp);
+
+	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	struct ls_rjt     stat;
+
+	if (vport->phba->nvmet_support) {
+		/* NVME Target mode.  Handle and respond to the PRLI and
+		 * transition to UNMAPPED provided the RPI has completed
+		 * registration.
+		 */
+		if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+			lpfc_rcv_prli(vport, ndlp, cmdiocb);
+			lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+		} else {
+			/* RPI registration has not completed. Reject the PRLI
+			 * to prevent an illegal state transition when the
+			 * rpi registration does complete.
+			 */
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+					 "6115 NVMET ndlp rpi %d state "
+					 "unknown, state x%x flags x%08x\n",
+					 ndlp->nlp_rpi, ndlp->nlp_state,
+					 ndlp->nlp_flag);
+			memset(&stat, 0, sizeof(struct ls_rjt));
+			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+					    ndlp, NULL);
+			return ndlp->nlp_state;
+		}
+	} else {
+		/* Initiator mode. */
+		lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	}
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	LPFC_MBOXQ_t	  *mb;
+	LPFC_MBOXQ_t	  *nextmb;
+	struct lpfc_dmabuf *mp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+	if ((mb = phba->sli.mbox_active)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+			lpfc_nlp_put(ndlp);
+			mb->context2 = NULL;
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+			lpfc_nlp_put(ndlp);
+			list_del(&mb->list);
+			phba->sli.mboxq_cnt--;
+			mempool_free(mb, phba->mbox_mem_pool);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
+			       struct lpfc_nodelist *ndlp,
+			       void *arg,
+			       uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+				  struct lpfc_nodelist *ndlp,
+				  void *arg,
+				  uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint32_t did  = mb->un.varWords[1];
+	int rc = 0;
+
+	if (mb->mbxStatus) {
+		/* RegLogin failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				"0246 RegLogin failed Data: x%x x%x x%x x%x "
+				 "x%x\n",
+				 did, mb->mbxStatus, vport->port_state,
+				 mb->un.varRegLogin.vpi,
+				 mb->un.varRegLogin.rpi);
+		/*
+		 * If RegLogin failed due to lack of HBA resources do not
+		 * retry discovery.
+		 */
+		if (mb->mbxStatus == MBXERR_RPI_FULL) {
+			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+			return ndlp->nlp_state;
+		}
+
+		/* Put ndlp in npr state set plogi timer for 1 sec */
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000 * 1));
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+
+		lpfc_issue_els_logo(vport, ndlp, 0);
+		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		return ndlp->nlp_state;
+	}
+
+	/* SLI4 ports have preallocated logical rpis. */
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+
+	/* Only if we are not a fabric nport do we issue PRLI */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "3066 RegLogin Complete on x%x x%x x%x\n",
+			 did, ndlp->nlp_type, ndlp->nlp_fc4_type);
+	if (!(ndlp->nlp_type & NLP_FABRIC) &&
+	    (phba->nvmet_support == 0)) {
+		/* The driver supports FCP and NVME concurrently.  If the
+		 * ndlp's nlp_fc4_type is still zero, the driver doesn't
+		 * know what PRLI to send yet.  Figure that out now and
+		 * call PRLI depending on the outcome.
+		 */
+		if (vport->fc_flag & FC_PT2PT) {
+			/* If we are pt2pt, there is no Fabric to determine
+			 * the FC4 type of the remote nport. So if NVME
+			 * is configured try it.
+			 */
+			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+			if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+			     (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+				/* We need to update the localport also */
+				lpfc_nvme_update_localport(vport);
+			}
+
+		} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
+		} else if (ndlp->nlp_fc4_type == 0) {
+			rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
+					 0, ndlp->nlp_DID);
+			return ndlp->nlp_state;
+		}
+
+		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+		lpfc_issue_els_prli(vport, ndlp, 0);
+	} else {
+		if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
+			phba->targetport->port_id = vport->fc_myDID;
+
+		/* Only Fabric ports should transition. NVME target
+		 * must complete PRLI.
+		 */
+		if (ndlp->nlp_type & NLP_FABRIC) {
+			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+static uint32_t
+lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
+				 struct lpfc_nodelist *ndlp,
+				 void *arg,
+				 uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+
+	/* If we are a target we won't immediately transition into PRLI,
+	 * so if REG_LOGIN already completed we don't need to ignore it.
+	 */
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
+	    !vport->phba->nvmet_support)
+		ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
+
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* Software abort outstanding PRLI before sending acc */
+	lpfc_els_abort(vport->phba, ndlp);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+/* This routine is envoked when we rcv a PRLO request from a nport
+ * we are logged into.  We should send back a PRLO rsp setting the
+ * appropriate bits.
+ * NEXT STATE = PRLI_ISSUE
+ */
+static uint32_t
+lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	struct lpfc_hba   *phba = vport->phba;
+	IOCB_t *irsp;
+	PRLI *npr;
+	struct lpfc_nvme_prli *nvpr;
+	void *temp_ptr;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	/* A solicited PRLI is either FCP or NVME.  The PRLI cmd/rsp
+	 * format is different so NULL the two PRLI types so that the
+	 * driver correctly gets the correct context.
+	 */
+	npr = NULL;
+	nvpr = NULL;
+	temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+	if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
+		npr = (PRLI *) temp_ptr;
+	else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
+		nvpr = (struct lpfc_nvme_prli *) temp_ptr;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus) {
+		if ((vport->port_type == LPFC_NPIV_PORT) &&
+		    vport->cfg_restrict_login) {
+			goto out;
+		}
+
+		/* When the rport rejected the FCP PRLI as unsupported.
+		 * This should only happen in Pt2Pt so an NVME PRLI
+		 * should be outstanding still.
+		 */
+		if (npr && ndlp->nlp_flag & NLP_FCP_PRLI_RJT) {
+			ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
+			goto out_err;
+		}
+
+		/* The LS Req had some error.  Don't let this be a
+		 * target.
+		 */
+		if ((ndlp->fc4_prli_sent == 1) &&
+		    (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
+		    (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
+			/* The FCP PRLI completed successfully but
+			 * the NVME PRLI failed.  Since they are sent in
+			 * succession, allow the FCP to complete.
+			 */
+			goto out_err;
+
+		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+		ndlp->nlp_type |= NLP_FCP_INITIATOR;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		return ndlp->nlp_state;
+	}
+
+	if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+	    (npr->prliType == PRLI_FCP_TYPE)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+				 "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
+				 npr->initiatorFunc,
+				 npr->targetFunc);
+		if (npr->initiatorFunc)
+			ndlp->nlp_type |= NLP_FCP_INITIATOR;
+		if (npr->targetFunc) {
+			ndlp->nlp_type |= NLP_FCP_TARGET;
+			if (npr->writeXferRdyDis)
+				ndlp->nlp_flag |= NLP_FIRSTBURST;
+		}
+		if (npr->Retry)
+			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+
+	} else if (nvpr &&
+		   (bf_get_be32(prli_acc_rsp_code, nvpr) ==
+		    PRLI_REQ_EXECUTED) &&
+		   (bf_get_be32(prli_type_code, nvpr) ==
+		    PRLI_NVME_TYPE)) {
+
+		/* Complete setting up the remote ndlp personality. */
+		if (bf_get_be32(prli_init, nvpr))
+			ndlp->nlp_type |= NLP_NVME_INITIATOR;
+
+		/* Target driver cannot solicit NVME FB. */
+		if (bf_get_be32(prli_tgt, nvpr)) {
+			/* Complete the nvme target roles.  The transport
+			 * needs to know if the rport is capable of
+			 * discovery in addition to its role.
+			 */
+			ndlp->nlp_type |= NLP_NVME_TARGET;
+			if (bf_get_be32(prli_disc, nvpr))
+				ndlp->nlp_type |= NLP_NVME_DISCOVERY;
+			if ((bf_get_be32(prli_fba, nvpr) == 1) &&
+			    (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
+			    (phba->cfg_nvme_enable_fb) &&
+			    (!phba->nvmet_support)) {
+				/* Both sides support FB. The target's first
+				 * burst size is a 512 byte encoded value.
+				 */
+				ndlp->nlp_flag |= NLP_FIRSTBURST;
+				ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
+								 nvpr);
+			}
+		}
+
+		if (bf_get_be32(prli_recov, nvpr))
+			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+				 "6029 NVME PRLI Cmpl w1 x%08x "
+				 "w4 x%08x w5 x%08x flag x%x, "
+				 "fcp_info x%x nlp_type x%x\n",
+				 be32_to_cpu(nvpr->word1),
+				 be32_to_cpu(nvpr->word4),
+				 be32_to_cpu(nvpr->word5),
+				 ndlp->nlp_flag, ndlp->nlp_fcp_info,
+				 ndlp->nlp_type);
+	}
+	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
+	    (vport->port_type == LPFC_NPIV_PORT) &&
+	     vport->cfg_restrict_login) {
+out:
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_issue_els_logo(vport, ndlp, 0);
+
+		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		return ndlp->nlp_state;
+	}
+
+out_err:
+	/* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
+	 * are complete.
+	 */
+	if (ndlp->fc4_prli_sent == 0) {
+		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+		if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+		else if (ndlp->nlp_type &
+			 (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	} else
+		lpfc_printf_vlog(vport,
+				 KERN_INFO, LOG_ELS,
+				 "3067 PRLI's still outstanding "
+				 "on x%06x - count %d, Pend Node Mode "
+				 "transition...\n",
+				 ndlp->nlp_DID, ndlp->fc4_prli_sent);
+
+	return ndlp->nlp_state;
+}
+
+/*! lpfc_device_rm_prli_issue
+ *
+ * \pre
+ * \post
+ * \param   phba
+ * \param   ndlp
+ * \param   arg
+ * \param   evt
+ * \return  uint32_t
+ *
+ * \b Description:
+ *    This routine is envoked when we a request to remove a nport we are in the
+ *    process of PRLIing. We should software abort outstanding prli, unreg
+ *    login, send a logout. We will change node state to UNUSED_NODE, put it
+ *    on plogi list so it can be freed when LOGO completes.
+ *
+ */
+
+static uint32_t
+lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		/* software abort outstanding PLOGI */
+		lpfc_els_abort(vport->phba, ndlp);
+
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+
+/*! lpfc_device_recov_prli_issue
+ *
+ * \pre
+ * \post
+ * \param   phba
+ * \param   ndlp
+ * \param   arg
+ * \param   evt
+ * \return  uint32_t
+ *
+ * \b Description:
+ *    The routine is envoked when the state of a device is unknown, like
+ *    during a link down. We should remove the nodelist entry from the
+ *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ *    outstanding PRLI command, then free the node entry.
+ */
+static uint32_t
+lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	/* software abort outstanding PRLI */
+	lpfc_els_abort(phba, ndlp);
+
+	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof(struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof(struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof(struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof(struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	/*
+	 * DevLoss has timed out and is calling for Device Remove.
+	 * In this case, abort the LOGO and cleanup the ndlp
+	 */
+
+	lpfc_unreg_rpi(vport, ndlp);
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(vport->phba, ndlp);
+	lpfc_drop_node(vport, ndlp);
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg, uint32_t evt)
+{
+	/*
+	 * Device Recovery events have no meaning for a node with a LOGO
+	 * outstanding.  The LOGO has to complete first and handle the
+	 * node from that point.
+	 */
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_prli(vport, ndlp, cmdiocb);
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* flush the target */
+	lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
+			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+
+	/* Treat like rcv logo */
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
+
+	/* Ignore PLOGI if we have an outstanding LOGO */
+	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
+		return ndlp->nlp_state;
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+		spin_unlock_irq(shost->host_lock);
+	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+		/* send PLOGI immediately, move to PLOGI issue state */
+		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		       void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, 0);
+		} else {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
+		       void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	/*
+	 * Do not start discovery if discovery is about to start
+	 * or discovery in progress for this node. Starting discovery
+	 * here will affect the counting of discovery threads.
+	 */
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, 0);
+		} else {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		       void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
+		mod_timer(&ndlp->nlp_delayfunc,
+			  jiffies + msecs_to_jiffies(1000 * 1));
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	} else {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DEFER_RM;
+		spin_unlock_irq(shost->host_lock);
+		return NLP_STE_FREED_NODE;
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/* For the fabric port just clear the fc flags. */
+	if (ndlp->nlp_DID == Fabric_DID) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+	}
+	lpfc_unreg_rpi(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t    *mb = &pmb->u.mb;
+
+	if (!mb->mbxStatus) {
+		/* SLI4 ports have preallocated logical rpis. */
+		if (vport->phba->sli_rev < LPFC_SLI_REV4)
+			ndlp->nlp_rpi = mb->un.varWords[0];
+		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+		if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+			lpfc_unreg_rpi(vport, ndlp);
+		}
+	} else {
+		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
+			lpfc_drop_node(vport, ndlp);
+			return NLP_STE_FREED_NODE;
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	}
+	lpfc_drop_node(vport, ndlp);
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp->nlp_state;
+}
+
+
+/* This next section defines the NPort Discovery State Machine */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The plogi list and adisc list are used when Link Up discovery or RSCN
+ * processing is needed. Each list holds the nodes that we will send PLOGI
+ * or ADISC on. These lists will keep track of what nodes will be effected
+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
+ * The unmapped_list will contain all nodes that we have successfully logged
+ * into at the Fibre Channel level. The mapped_list will contain all nodes
+ * that are mapped FCP targets.
+ */
+/*
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+/* For UNUSED_NODE state, the node has just been allocated .
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list.  32 entries are processed initially and
+ * ADISC is initited for each one.  Completions / Events for each node are
+ * funnelled thru the state machine.  As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list.  32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine.  As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * indentically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
+     (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
+	/* Action routine                  Event       Current State  */
+	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
+	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
+	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
+	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
+	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
+	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
+	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
+	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
+	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
+	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
+	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
+	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
+	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
+	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
+	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
+	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_logo_issue,	/* RCV_PLOGI   LOGO_ISSUE     */
+	lpfc_rcv_prli_logo_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_logo_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_logo_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_logo_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_logo_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_cmpl_logo_logo_issue,	/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_logo_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_logo_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
+	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
+	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_disc_illegal,		/* DEVICE_RM       */
+	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
+	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
+	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_disc_illegal,		/* DEVICE_RM       */
+	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
+	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
+	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
+	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
+	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
+	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
+	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
+	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
+	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
+	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
+	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
+	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
+	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
+};
+
+int
+lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	uint32_t cur_state, rc;
+	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
+			 uint32_t);
+	uint32_t got_ndlp = 0;
+
+	if (lpfc_nlp_get(ndlp))
+		got_ndlp = 1;
+
+	cur_state = ndlp->nlp_state;
+
+	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0211 DSM in event x%x on NPort x%x in "
+			 "state %d Data: x%x x%x\n",
+			 evt, ndlp->nlp_DID, cur_state,
+			 ndlp->nlp_flag, ndlp->nlp_fc4_type);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+		 "DSM in:          evt:%d ste:%d did:x%x",
+		evt, cur_state, ndlp->nlp_DID);
+
+	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
+	rc = (func) (vport, ndlp, arg, evt);
+
+	/* DSM out state <rc> on NPort <nlp_DID> */
+	if (got_ndlp) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
+			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+			"DSM out:         ste:%d did:x%x flg:x%x",
+			rc, ndlp->nlp_DID, ndlp->nlp_flag);
+		/* Decrement the ndlp reference count held for this function */
+		lpfc_nlp_put(ndlp);
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			"0213 DSM out state %d on NPort free\n", rc);
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+			"DSM out:         ste:%d did:x%x flg:x%x",
+			rc, 0, 0);
+	}
+
+	return rc;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvme.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvme.c
new file mode 100644
index 0000000..fcf4b41
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvme.c
@@ -0,0 +1,2510 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ ********************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme.h>
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_nvme.h"
+#include "lpfc_scsi.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* NVME initiator-based functions */
+
+static struct lpfc_nvme_buf *
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+
+static void
+lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+
+
+/**
+ * lpfc_nvme_create_queue -
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
+ * @handle: An opaque driver handle used in follow-up calls.
+ *
+ * Driver registers this routine to preallocate and initialize any
+ * internal data structures to bind the @qidx to its internal IO queues.
+ * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
+ *
+ * Return value :
+ *   0 - Success
+ *   -EINVAL - Unsupported input value.
+ *   -ENOMEM - Could not alloc necessary memory
+ **/
+static int
+lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
+		       unsigned int qidx, u16 qsize,
+		       void **handle)
+{
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_vport *vport;
+	struct lpfc_nvme_qhandle *qhandle;
+	char *str;
+
+	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	vport = lport->vport;
+	qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
+	if (qhandle == NULL)
+		return -ENOMEM;
+
+	qhandle->cpu_id = smp_processor_id();
+	qhandle->qidx = qidx;
+	/*
+	 * NVME qidx == 0 is the admin queue, so both admin queue
+	 * and first IO queue will use MSI-X vector and associated
+	 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
+	 */
+	if (qidx) {
+		str = "IO ";  /* IO queue */
+		qhandle->index = ((qidx - 1) %
+			vport->phba->cfg_nvme_io_channel);
+	} else {
+		str = "ADM";  /* Admin queue */
+		qhandle->index = qidx;
+	}
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+			 "6073 Binding %s HdwQueue %d  (cpu %d) to "
+			 "io_channel %d qhandle %p\n", str,
+			 qidx, qhandle->cpu_id, qhandle->index, qhandle);
+	*handle = (void *)qhandle;
+	return 0;
+}
+
+/**
+ * lpfc_nvme_delete_queue -
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
+ * @handle: An opaque driver handle from lpfc_nvme_create_queue
+ *
+ * Driver registers this routine to free
+ * any internal data structures to bind the @qidx to its internal
+ * IO queues.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO:  What are the failure codes.
+ **/
+static void
+lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
+		       unsigned int qidx,
+		       void *handle)
+{
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_vport *vport;
+
+	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	vport = lport->vport;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+			"6001 ENTER.  lpfc_pnvme %p, qidx x%x qhandle %p\n",
+			lport, qidx, handle);
+	kfree(handle);
+}
+
+static void
+lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
+{
+	struct lpfc_nvme_lport *lport = localport->private;
+
+	/* release any threads waiting for the unreg to complete */
+	complete(&lport->lport_unreg_done);
+}
+
+/* lpfc_nvme_remoteport_delete
+ *
+ * @remoteport: Pointer to an nvme transport remoteport instance.
+ *
+ * This is a template downcall.  NVME transport calls this function
+ * when it has completed the unregistration of a previously
+ * registered remoteport.
+ *
+ * Return value :
+ * None
+ */
+void
+lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+	struct lpfc_nvme_rport *rport = remoteport->private;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = rport->ndlp;
+	if (!ndlp)
+		goto rport_err;
+
+	vport = ndlp->vport;
+	if (!vport)
+		goto rport_err;
+
+	/* Remove this rport from the lport's list - memory is owned by the
+	 * transport. Remove the ndlp reference for the NVME transport before
+	 * calling state machine to remove the node.
+	 */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			"6146 remoteport delete complete %p\n",
+			remoteport);
+	ndlp->nrport = NULL;
+	lpfc_nlp_put(ndlp);
+
+ rport_err:
+	/* This call has to execute as long as the rport is valid.
+	 * Release any threads waiting for the unreg to complete.
+	 */
+	complete(&rport->rport_unreg_done);
+}
+
+static void
+lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+		       struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_vport *vport = cmdwqe->vport;
+	uint32_t status;
+	struct nvmefc_ls_req *pnvme_lsreq;
+	struct lpfc_dmabuf *buf_ptr;
+	struct lpfc_nodelist *ndlp;
+
+	atomic_inc(&vport->phba->fc4NvmeLsCmpls);
+
+	pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
+	status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+	ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			 "6047 nvme cmpl Enter "
+			 "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
+			 "bmp:%p ndlp:%p\n",
+			 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
+			 cmdwqe->sli4_xritag, status,
+			 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+
+	lpfc_nvmeio_data(phba, "NVME LS  CMPL: xri x%x stat x%x parm x%x\n",
+			 cmdwqe->sli4_xritag, status, wcqe->parameter);
+
+	if (cmdwqe->context3) {
+		buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+		cmdwqe->context3 = NULL;
+	}
+	if (pnvme_lsreq->done)
+		pnvme_lsreq->done(pnvme_lsreq, status);
+	else
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+				 "6046 nvme cmpl without done call back? "
+				 "Data %p DID %x Xri: %x status %x\n",
+				pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
+				cmdwqe->sli4_xritag, status);
+	if (ndlp) {
+		lpfc_nlp_put(ndlp);
+		cmdwqe->context1 = NULL;
+	}
+	lpfc_sli_release_iocbq(phba, cmdwqe);
+}
+
+static int
+lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+		  struct lpfc_dmabuf *inp,
+		 struct nvmefc_ls_req *pnvme_lsreq,
+	     void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_wcqe_complete *),
+	     struct lpfc_nodelist *ndlp, uint32_t num_entry,
+	     uint32_t tmo, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	union lpfc_wqe *wqe;
+	struct lpfc_iocbq *genwqe;
+	struct ulp_bde64 *bpl;
+	struct ulp_bde64 bde;
+	int i, rc, xmit_len, first_len;
+
+	/* Allocate buffer for  command WQE */
+	genwqe = lpfc_sli_get_iocbq(phba);
+	if (genwqe == NULL)
+		return 1;
+
+	wqe = &genwqe->wqe;
+	memset(wqe, 0, sizeof(union lpfc_wqe));
+
+	genwqe->context3 = (uint8_t *)bmp;
+	genwqe->iocb_flag |= LPFC_IO_NVME_LS;
+
+	/* Save for completion so we can release these resources */
+	genwqe->context1 = lpfc_nlp_get(ndlp);
+	genwqe->context2 = (uint8_t *)pnvme_lsreq;
+	/* Fill in payload, bp points to frame payload */
+
+	if (!tmo)
+		/* FC spec states we need 3 * ratov for CT requests */
+		tmo = (3 * phba->fc_ratov);
+
+	/* For this command calculate the xmit length of the request bde. */
+	xmit_len = 0;
+	first_len = 0;
+	bpl = (struct ulp_bde64 *)bmp->virt;
+	for (i = 0; i < num_entry; i++) {
+		bde.tus.w = bpl[i].tus.w;
+		if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+			break;
+		xmit_len += bde.tus.f.bdeSize;
+		if (i == 0)
+			first_len = xmit_len;
+	}
+
+	genwqe->rsvd2 = num_entry;
+	genwqe->hba_wqidx = 0;
+
+	/* Words 0 - 2 */
+	wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	wqe->generic.bde.tus.f.bdeSize = first_len;
+	wqe->generic.bde.addrLow = bpl[0].addrLow;
+	wqe->generic.bde.addrHigh = bpl[0].addrHigh;
+
+	/* Word 3 */
+	wqe->gen_req.request_payload_len = first_len;
+
+	/* Word 4 */
+
+	/* Word 5 */
+	bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
+	bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
+	bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
+	bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
+	bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
+
+	/* Word 6 */
+	bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
+	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+	bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
+
+	/* Word 7 */
+	bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
+	bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
+	bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
+	bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
+
+	/* Word 8 */
+	wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
+
+	/* Word 9 */
+	bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
+
+	/* Word 10 */
+	bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+	bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+	bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+	bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+	bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+
+	/* Word 11 */
+	bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+	bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
+
+
+	/* Issue GEN REQ WQE for NPORT <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "6050 Issue GEN REQ WQE to NPORT x%x "
+			 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
+			 ndlp->nlp_DID, genwqe->iotag,
+			 vport->port_state,
+			genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
+	genwqe->wqe_cmpl = cmpl;
+	genwqe->iocb_cmpl = NULL;
+	genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
+	genwqe->vport = vport;
+	genwqe->retry = retry;
+
+	lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
+			 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
+
+	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
+	if (rc) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "6045 Issue GEN REQ WQE to NPORT x%x "
+				 "Data: x%x x%x\n",
+				 ndlp->nlp_DID, genwqe->iotag,
+				 vport->port_state);
+		lpfc_sli_release_iocbq(phba, genwqe);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_nvme_ls_req - Issue an Link Service request
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ *
+ * Driver registers this routine to handle any link service request
+ * from the nvme_fc transport to a remote nvme-aware port.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+		 struct nvme_fc_remote_port *pnvme_rport,
+		 struct nvmefc_ls_req *pnvme_lsreq)
+{
+	int ret = 0;
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct ulp_bde64 *bpl;
+	struct lpfc_dmabuf *bmp;
+	uint16_t ntype, nstate;
+
+	/* there are two dma buf in the request, actually there is one and
+	 * the second one is just the start address + cmd size.
+	 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
+	 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
+	 * because the nvem layer owns the data bufs.
+	 * We do not have to break these packets open, we don't care what is in
+	 * them. And we do not have to look at the resonse data, we only care
+	 * that we got a response. All of the caring is going to happen in the
+	 * nvme-fc layer.
+	 */
+
+	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	vport = lport->vport;
+
+	ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+				 "6051 DID x%06x not an active rport.\n",
+				 pnvme_rport->port_id);
+		return -ENODEV;
+	}
+
+	/* The remote node has to be a mapped nvme target or an
+	 * unmapped nvme initiator or it's an error.
+	 */
+	ntype = ndlp->nlp_type;
+	nstate = ndlp->nlp_state;
+	if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
+	    (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+				 "6088 DID x%06x not ready for "
+				 "IO. State x%x, Type x%x\n",
+				 pnvme_rport->port_id,
+				 ndlp->nlp_state, ndlp->nlp_type);
+		return -ENODEV;
+	}
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+				 "6044 Could not find node for DID %x\n",
+				 pnvme_rport->port_id);
+		return 2;
+	}
+	INIT_LIST_HEAD(&bmp->list);
+	bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
+	if (!bmp->virt) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+				 "6042 Could not find node for DID %x\n",
+				 pnvme_rport->port_id);
+		kfree(bmp);
+		return 3;
+	}
+	bpl = (struct ulp_bde64 *)bmp->virt;
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
+	bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
+	bpl->tus.f.bdeFlags = 0;
+	bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+	bpl++;
+
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
+	bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
+	bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+	bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	/* Expand print to include key fields. */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			 "6149 ENTER.  lport %p, rport %p lsreq%p rqstlen:%d "
+			 "rsplen:%d %pad %pad\n",
+			 pnvme_lport, pnvme_rport,
+			 pnvme_lsreq, pnvme_lsreq->rqstlen,
+			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+			 &pnvme_lsreq->rspdma);
+
+	atomic_inc(&vport->phba->fc4NvmeLsRequests);
+
+	/* Hardcode the wait to 30 seconds.  Connections are failing otherwise.
+	 * This code allows it all to work.
+	 */
+	ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
+				pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
+				ndlp, 2, 30, 0);
+	if (ret != WQE_SUCCESS) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+				 "6052 EXIT. issue ls wqe failed lport %p, "
+				 "rport %p lsreq%p Status %x DID %x\n",
+				 pnvme_lport, pnvme_rport, pnvme_lsreq,
+				 ret, ndlp->nlp_DID);
+		lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
+		kfree(bmp);
+		return ret;
+	}
+
+	/* Stub in routine and return 0 for now. */
+	return ret;
+}
+
+/**
+ * lpfc_nvme_ls_abort - Issue an Link Service request
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ *
+ * Driver registers this routine to handle any link service request
+ * from the nvme_fc transport to a remote nvme-aware port.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO: What are the failure codes.
+ **/
+static void
+lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
+		   struct nvme_fc_remote_port *pnvme_rport,
+		   struct nvmefc_ls_req *pnvme_lsreq)
+{
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	struct lpfc_nodelist *ndlp;
+	LIST_HEAD(abort_list);
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *wqe, *next_wqe;
+
+	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	vport = lport->vport;
+	phba = vport->phba;
+
+	ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+	if (!ndlp) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6049 Could not find node for DID %x\n",
+				 pnvme_rport->port_id);
+		return;
+	}
+
+	/* Expand print to include key fields. */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+			 "6040 ENTER.  lport %p, rport %p lsreq %p rqstlen:%d "
+			 "rsplen:%d %pad %pad\n",
+			 pnvme_lport, pnvme_rport,
+			 pnvme_lsreq, pnvme_lsreq->rqstlen,
+			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+			 &pnvme_lsreq->rspdma);
+
+	/*
+	 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
+	 * that need an ABTS.  The IOs need to stay on the txcmplq so that
+	 * the abort operation completes them successfully.
+	 */
+	pring = phba->sli4_hba.nvmels_wq->pring;
+	spin_lock_irq(&phba->hbalock);
+	spin_lock(&pring->ring_lock);
+	list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
+		/* Add to abort_list on on NDLP match. */
+		if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
+			wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+			list_add_tail(&wqe->dlist, &abort_list);
+		}
+	}
+	spin_unlock(&pring->ring_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Abort the targeted IOs and remove them from the abort list. */
+	list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+		spin_lock_irq(&phba->hbalock);
+		list_del_init(&wqe->dlist);
+		lpfc_sli_issue_abort_iotag(phba, pring, wqe);
+		spin_unlock_irq(&phba->hbalock);
+	}
+}
+
+/* Fix up the existing sgls for NVME IO. */
+static void
+lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
+		       struct lpfc_nvme_buf *lpfc_ncmd,
+		       struct nvmefc_fcp_req *nCmd)
+{
+	struct sli4_sge *sgl;
+	union lpfc_wqe128 *wqe;
+	uint32_t *wptr, *dptr;
+
+	/*
+	 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
+	 * match NVME.  NVME sends 96 bytes. Also, use the
+	 * nvme commands command and response dma addresses
+	 * rather than the virtual memory to ease the restore
+	 * operation.
+	 */
+	sgl = lpfc_ncmd->nvme_sgl;
+	sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
+
+	sgl++;
+
+	/* Setup the physical region for the FCP RSP */
+	sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
+	sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
+	sgl->word2 = le32_to_cpu(sgl->word2);
+	if (nCmd->sg_cnt)
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+	else
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+	sgl->word2 = cpu_to_le32(sgl->word2);
+	sgl->sge_len = cpu_to_le32(nCmd->rsplen);
+
+	/*
+	 * Get a local pointer to the built-in wqe and correct
+	 * the cmd size to match NVME's 96 bytes and fix
+	 * the dma address.
+	 */
+
+	/* 128 byte wqe support here */
+	wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
+
+	/* Word 0-2 - NVME CMND IU (embedded payload) */
+	wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
+	wqe->generic.bde.tus.f.bdeSize = 60;
+	wqe->generic.bde.addrHigh = 0;
+	wqe->generic.bde.addrLow =  64;  /* Word 16 */
+
+	/* Word 3 */
+	bf_set(payload_offset_len, &wqe->fcp_icmd,
+	       (nCmd->rsplen + nCmd->cmdlen));
+
+	/* Word 10 */
+	bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
+	bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
+
+	/*
+	 * Embed the payload in the last half of the WQE
+	 * WQE words 16-30 get the NVME CMD IU payload
+	 *
+	 * WQE words 16-19 get payload Words 1-4
+	 * WQE words 20-21 get payload Words 6-7
+	 * WQE words 22-29 get payload Words 16-23
+	 */
+	wptr = &wqe->words[16];  /* WQE ptr */
+	dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
+	dptr++;			/* Skip Word 0 in payload */
+
+	*wptr++ = *dptr++;	/* Word 1 */
+	*wptr++ = *dptr++;	/* Word 2 */
+	*wptr++ = *dptr++;	/* Word 3 */
+	*wptr++ = *dptr++;	/* Word 4 */
+	dptr++;			/* Skip Word 5 in payload */
+	*wptr++ = *dptr++;	/* Word 6 */
+	*wptr++ = *dptr++;	/* Word 7 */
+	dptr += 8;		/* Skip Words 8-15 in payload */
+	*wptr++ = *dptr++;	/* Word 16 */
+	*wptr++ = *dptr++;	/* Word 17 */
+	*wptr++ = *dptr++;	/* Word 18 */
+	*wptr++ = *dptr++;	/* Word 19 */
+	*wptr++ = *dptr++;	/* Word 20 */
+	*wptr++ = *dptr++;	/* Word 21 */
+	*wptr++ = *dptr++;	/* Word 22 */
+	*wptr   = *dptr;	/* Word 23 */
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+static void
+lpfc_nvme_ktime(struct lpfc_hba *phba,
+		struct lpfc_nvme_buf *lpfc_ncmd)
+{
+	uint64_t seg1, seg2, seg3, seg4;
+
+	if (!phba->ktime_on)
+		return;
+	if (!lpfc_ncmd->ts_last_cmd ||
+	    !lpfc_ncmd->ts_cmd_start ||
+	    !lpfc_ncmd->ts_cmd_wqput ||
+	    !lpfc_ncmd->ts_isr_cmpl ||
+	    !lpfc_ncmd->ts_data_nvme)
+		return;
+	if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
+		return;
+	if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
+		return;
+	if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
+		return;
+	if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
+		return;
+	/*
+	 * Segment 1 - Time from Last FCP command cmpl is handed
+	 * off to NVME Layer to start of next command.
+	 * Segment 2 - Time from Driver receives a IO cmd start
+	 * from NVME Layer to WQ put is done on IO cmd.
+	 * Segment 3 - Time from Driver WQ put is done on IO cmd
+	 * to MSI-X ISR for IO cmpl.
+	 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
+	 * cmpl is handled off to the NVME Layer.
+	 */
+	seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
+	if (seg1 > 5000000)  /* 5 ms - for sequential IOs */
+		return;
+
+	/* Calculate times relative to start of IO */
+	seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
+	seg3 = (lpfc_ncmd->ts_isr_cmpl -
+		lpfc_ncmd->ts_cmd_start) - seg2;
+	seg4 = (lpfc_ncmd->ts_data_nvme -
+		lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
+	phba->ktime_data_samples++;
+	phba->ktime_seg1_total += seg1;
+	if (seg1 < phba->ktime_seg1_min)
+		phba->ktime_seg1_min = seg1;
+	else if (seg1 > phba->ktime_seg1_max)
+		phba->ktime_seg1_max = seg1;
+	phba->ktime_seg2_total += seg2;
+	if (seg2 < phba->ktime_seg2_min)
+		phba->ktime_seg2_min = seg2;
+	else if (seg2 > phba->ktime_seg2_max)
+		phba->ktime_seg2_max = seg2;
+	phba->ktime_seg3_total += seg3;
+	if (seg3 < phba->ktime_seg3_min)
+		phba->ktime_seg3_min = seg3;
+	else if (seg3 > phba->ktime_seg3_max)
+		phba->ktime_seg3_max = seg3;
+	phba->ktime_seg4_total += seg4;
+	if (seg4 < phba->ktime_seg4_min)
+		phba->ktime_seg4_min = seg4;
+	else if (seg4 > phba->ktime_seg4_max)
+		phba->ktime_seg4_max = seg4;
+
+	lpfc_ncmd->ts_last_cmd = 0;
+	lpfc_ncmd->ts_cmd_start = 0;
+	lpfc_ncmd->ts_cmd_wqput  = 0;
+	lpfc_ncmd->ts_isr_cmpl = 0;
+	lpfc_ncmd->ts_data_nvme = 0;
+}
+#endif
+
+/**
+ * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ *
+ * Driver registers this routine as it io request handler.  This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO: What are the failure codes.
+ **/
+static void
+lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+			  struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_nvme_buf *lpfc_ncmd =
+		(struct lpfc_nvme_buf *)pwqeIn->context1;
+	struct lpfc_vport *vport = pwqeIn->vport;
+	struct nvmefc_fcp_req *nCmd;
+	struct nvme_fc_ersp_iu *ep;
+	struct nvme_fc_cmd_iu *cp;
+	struct lpfc_nvme_rport *rport;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_nvme_fcpreq_priv *freqpriv;
+	unsigned long flags;
+	uint32_t code;
+	uint16_t cid, sqhd, data;
+	uint32_t *ptr;
+
+	/* Sanity check on return of outstanding command */
+	if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+				 "6071 Completion pointers bad on wqe %p.\n",
+				 wcqe);
+		return;
+	}
+	atomic_inc(&phba->fc4NvmeIoCmpls);
+
+	nCmd = lpfc_ncmd->nvmeCmd;
+	rport = lpfc_ncmd->nrport;
+
+	lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
+			 lpfc_ncmd->cur_iocbq.sli4_xritag,
+			 bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+	/*
+	 * Catch race where our node has transitioned, but the
+	 * transport is still transitioning.
+	 */
+	ndlp = rport->ndlp;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+				 "6061 rport %p,  DID x%06x node not ready.\n",
+				 rport, rport->remoteport->port_id);
+
+		ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
+		if (!ndlp) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+					 "6062 Ignoring NVME cmpl.  No ndlp\n");
+			goto out_err;
+		}
+	}
+
+	code = bf_get(lpfc_wcqe_c_code, wcqe);
+	if (code == CQE_CODE_NVME_ERSP) {
+		/* For this type of CQE, we need to rebuild the rsp */
+		ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
+		/*
+		 * Get Command Id from cmd to plug into response. This
+		 * code is not needed in the next NVME Transport drop.
+		 */
+		cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+		cid = cp->sqe.common.command_id;
+
+		/*
+		 * RSN is in CQE word 2
+		 * SQHD is in CQE Word 3 bits 15:0
+		 * Cmd Specific info is in CQE Word 1
+		 * and in CQE Word 0 bits 15:0
+		 */
+		sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
+
+		/* Now lets build the NVME ERSP IU */
+		ep->iu_len = cpu_to_be16(8);
+		ep->rsn = wcqe->parameter;
+		ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
+		ep->rsvd12 = 0;
+		ptr = (uint32_t *)&ep->cqe.result.u64;
+		*ptr++ = wcqe->total_data_placed;
+		data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
+		*ptr = (uint32_t)data;
+		ep->cqe.sq_head = sqhd;
+		ep->cqe.sq_id =  nCmd->sqid;
+		ep->cqe.command_id = cid;
+		ep->cqe.status = 0;
+
+		lpfc_ncmd->status = IOSTAT_SUCCESS;
+		lpfc_ncmd->result = 0;
+		nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
+		nCmd->transferred_length = nCmd->payload_length;
+	} else {
+		lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
+			    LPFC_IOCB_STATUS_MASK);
+		lpfc_ncmd->result = wcqe->parameter;
+
+		/* For NVME, the only failure path that results in an
+		 * IO error is when the adapter rejects it.  All other
+		 * conditions are a success case and resolved by the
+		 * transport.
+		 * IOSTAT_FCP_RSP_ERROR means:
+		 * 1. Length of data received doesn't match total
+		 *    transfer length in WQE
+		 * 2. If the RSP payload does NOT match these cases:
+		 *    a. RSP length 12/24 bytes and all zeros
+		 *    b. NVME ERSP
+		 */
+		switch (lpfc_ncmd->status) {
+		case IOSTAT_SUCCESS:
+			nCmd->transferred_length = wcqe->total_data_placed;
+			nCmd->rcv_rsplen = 0;
+			nCmd->status = 0;
+			break;
+		case IOSTAT_FCP_RSP_ERROR:
+			nCmd->transferred_length = wcqe->total_data_placed;
+			nCmd->rcv_rsplen = wcqe->parameter;
+			nCmd->status = 0;
+			/* Sanity check */
+			if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
+				break;
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+					 "6081 NVME Completion Protocol Error: "
+					 "xri %x status x%x result x%x "
+					 "placed x%x\n",
+					 lpfc_ncmd->cur_iocbq.sli4_xritag,
+					 lpfc_ncmd->status, lpfc_ncmd->result,
+					 wcqe->total_data_placed);
+			break;
+		default:
+out_err:
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+					 "6072 NVME Completion Error: xri %x "
+					 "status x%x result x%x placed x%x\n",
+					 lpfc_ncmd->cur_iocbq.sli4_xritag,
+					 lpfc_ncmd->status, lpfc_ncmd->result,
+					 wcqe->total_data_placed);
+			nCmd->transferred_length = 0;
+			nCmd->rcv_rsplen = 0;
+			nCmd->status = NVME_SC_INTERNAL;
+		}
+	}
+
+	/* pick up SLI4 exhange busy condition */
+	if (bf_get(lpfc_wcqe_c_xb, wcqe))
+		lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
+	else
+		lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+		atomic_dec(&ndlp->cmd_pending);
+
+	/* Update stats and complete the IO.  There is
+	 * no need for dma unprep because the nvme_transport
+	 * owns the dma address.
+	 */
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on) {
+		lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
+		lpfc_ncmd->ts_data_nvme = ktime_get_ns();
+		phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
+		lpfc_nvme_ktime(phba, lpfc_ncmd);
+	}
+	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+		if (lpfc_ncmd->cpu != smp_processor_id())
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+					 "6701 CPU Check cmpl: "
+					 "cpu %d expect %d\n",
+					 smp_processor_id(), lpfc_ncmd->cpu);
+		if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
+			phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+	}
+#endif
+	freqpriv = nCmd->private;
+	freqpriv->nvme_buf = NULL;
+	nCmd->done(nCmd);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	lpfc_ncmd->nrport = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	lpfc_release_nvme_buf(phba, lpfc_ncmd);
+}
+
+
+/**
+ * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as it io request handler.  This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
+		      struct lpfc_nvme_buf *lpfc_ncmd,
+		      struct lpfc_nodelist *pnode)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
+	struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+	union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
+	uint32_t req_len;
+
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return -EINVAL;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.
+	 */
+	wqe->fcp_iwrite.initial_xfer_len = 0;
+	if (nCmd->sg_cnt) {
+		if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
+			/* Word 5 */
+			if ((phba->cfg_nvme_enable_fb) &&
+			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
+				req_len = lpfc_ncmd->nvmeCmd->payload_length;
+				if (req_len < pnode->nvme_fb_size)
+					wqe->fcp_iwrite.initial_xfer_len =
+						req_len;
+				else
+					wqe->fcp_iwrite.initial_xfer_len =
+						pnode->nvme_fb_size;
+			}
+
+			/* Word 7 */
+			bf_set(wqe_cmnd, &wqe->generic.wqe_com,
+			       CMD_FCP_IWRITE64_WQE);
+			bf_set(wqe_pu, &wqe->generic.wqe_com,
+			       PARM_READ_CHECK);
+
+			/* Word 10 */
+			bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
+			bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com,
+			       LPFC_WQE_IOD_WRITE);
+			bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+			       LPFC_WQE_LENLOC_WORD4);
+			if (phba->cfg_nvme_oas)
+				bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+
+			/* Word 11 */
+			bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
+			       NVME_WRITE_CMD);
+
+			atomic_inc(&phba->fc4NvmeOutputRequests);
+		} else {
+			/* Word 7 */
+			bf_set(wqe_cmnd, &wqe->generic.wqe_com,
+			       CMD_FCP_IREAD64_WQE);
+			bf_set(wqe_pu, &wqe->generic.wqe_com,
+			       PARM_READ_CHECK);
+
+			/* Word 10 */
+			bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
+			bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+			       LPFC_WQE_IOD_READ);
+			bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+			       LPFC_WQE_LENLOC_WORD4);
+			if (phba->cfg_nvme_oas)
+				bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+
+			/* Word 11 */
+			bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
+			       NVME_READ_CMD);
+
+			atomic_inc(&phba->fc4NvmeInputRequests);
+		}
+	} else {
+		/* Word 4 */
+		wqe->fcp_icmd.rsrvd4 = 0;
+
+		/* Word 7 */
+		bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE);
+		bf_set(wqe_pu, &wqe->generic.wqe_com, 0);
+
+		/* Word 10 */
+		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		if (phba->cfg_nvme_oas)
+			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+
+		/* Word 11 */
+		bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
+
+		atomic_inc(&phba->fc4NvmeControlRequests);
+	}
+	/*
+	 * Finish initializing those WQE fields that are independent
+	 * of the nvme_cmnd request_buffer
+	 */
+
+	/* Word 6 */
+	bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+	       phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
+	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+
+	/* Word 7 */
+	/* Preserve Class data in the ndlp. */
+	bf_set(wqe_class, &wqe->generic.wqe_com,
+	       (pnode->nlp_fcp_info & 0x0f));
+
+	/* Word 8 */
+	wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+
+	/* Word 9 */
+	bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+	/* Word 11 */
+	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+	pwqeq->vport = vport;
+	return 0;
+}
+
+
+/**
+ * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as it io request handler.  This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
+		      struct lpfc_nvme_buf *lpfc_ncmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
+	union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
+	struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
+	struct scatterlist *data_sg;
+	struct sli4_sge *first_data_sgl;
+	dma_addr_t physaddr;
+	uint32_t num_bde = 0;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+	int nseg, i;
+
+	/* Fix up the command and response DMA stuff. */
+	lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.
+	 */
+	if (nCmd->sg_cnt) {
+		/*
+		 * Jump over the cmd and rsp SGEs.  The fix routine
+		 * has already adjusted for this.
+		 */
+		sgl += 2;
+
+		first_data_sgl = sgl;
+		lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
+		if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6058 Too many sg segments from "
+					"NVME Transport.  Max %d, "
+					"nvmeIO sg_cnt %d\n",
+					phba->cfg_nvme_seg_cnt,
+					lpfc_ncmd->seg_cnt);
+			lpfc_ncmd->seg_cnt = 0;
+			return 1;
+		}
+
+		/*
+		 * The driver established a maximum scatter-gather segment count
+		 * during probe that limits the number of sg elements in any
+		 * single nvme command.  Just run through the seg_cnt and format
+		 * the sge's.
+		 */
+		nseg = nCmd->sg_cnt;
+		data_sg = nCmd->first_sgl;
+		for (i = 0; i < nseg; i++) {
+			if (data_sg == NULL) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+						"6059 dptr err %d, nseg %d\n",
+						i, nseg);
+				lpfc_ncmd->seg_cnt = 0;
+				return 1;
+			}
+			physaddr = data_sg->dma_address;
+			dma_len = data_sg->length;
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			if ((num_bde + 1) == nseg)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len = cpu_to_le32(dma_len);
+
+			dma_offset += dma_len;
+			data_sg = sg_next(data_sg);
+			sgl++;
+		}
+	} else {
+		/* For this clause to be valid, the payload_length
+		 * and sg_cnt must zero.
+		 */
+		if (nCmd->payload_length != 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6063 NVME DMA Prep Err: sg_cnt %d "
+					"payload_length x%x\n",
+					nCmd->sg_cnt, nCmd->payload_length);
+			return 1;
+		}
+	}
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of WQE here
+	 */
+	wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
+	return 0;
+}
+
+/**
+ * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as it io request handler.  This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport
+ indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ *   0 - Success
+ *   TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
+			struct nvme_fc_remote_port *pnvme_rport,
+			void *hw_queue_handle,
+			struct nvmefc_fcp_req *pnvme_fcreq)
+{
+	int ret = 0;
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_nvme_buf *lpfc_ncmd;
+	struct lpfc_nvme_rport *rport;
+	struct lpfc_nvme_qhandle *lpfc_queue_info;
+	struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint64_t start = 0;
+#endif
+
+	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	vport = lport->vport;
+	phba = vport->phba;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on)
+		start = ktime_get_ns();
+#endif
+	rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+	lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
+
+	/*
+	 * Catch race where our node has transitioned, but the
+	 * transport is still transitioning.
+	 */
+	ndlp = rport->ndlp;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+				 "6053 rport %p, ndlp %p, DID x%06x "
+				 "ndlp not ready.\n",
+				 rport, ndlp, pnvme_rport->port_id);
+
+		ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+		if (!ndlp) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+					 "6066 Missing node for DID %x\n",
+					 pnvme_rport->port_id);
+			ret = -ENODEV;
+			goto out_fail;
+		}
+	}
+
+	/* The remote node has to be a mapped target or it's an error. */
+	if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+				 "6036 rport %p, DID x%06x not ready for "
+				 "IO. State x%x, Type x%x\n",
+				 rport, pnvme_rport->port_id,
+				 ndlp->nlp_state, ndlp->nlp_type);
+		ret = -ENODEV;
+		goto out_fail;
+
+	}
+
+	/* The node is shared with FCP IO, make sure the IO pending count does
+	 * not exceed the programmed depth.
+	 */
+	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+		ret = -EBUSY;
+		goto out_fail;
+	}
+
+	lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+	if (lpfc_ncmd == NULL) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+				 "6065 driver's buffer pool is empty, "
+				 "IO failed\n");
+		ret = -EBUSY;
+		goto out_fail;
+	}
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on) {
+		lpfc_ncmd->ts_cmd_start = start;
+		lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
+	}
+#endif
+
+	/*
+	 * Store the data needed by the driver to issue, abort, and complete
+	 * an IO.
+	 * Do not let the IO hang out forever.  There is no midlayer issuing
+	 * an abort so inform the FW of the maximum IO pending time.
+	 */
+	freqpriv->nvme_buf = lpfc_ncmd;
+	lpfc_ncmd->nvmeCmd = pnvme_fcreq;
+	lpfc_ncmd->nrport = rport;
+	lpfc_ncmd->ndlp = ndlp;
+	lpfc_ncmd->start_time = jiffies;
+
+	lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
+	ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_free_nvme_buf;
+	}
+
+	atomic_inc(&ndlp->cmd_pending);
+
+	/*
+	 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
+	 * This identfier was create in our hardware queue create callback
+	 * routine. The driver now is dependent on the IO queue steering from
+	 * the transport.  We are trusting the upper NVME layers know which
+	 * index to use and that they have affinitized a CPU to this hardware
+	 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
+	 */
+	lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
+
+	lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
+			 lpfc_ncmd->cur_iocbq.sli4_xritag,
+			 lpfc_queue_info->index, ndlp->nlp_DID);
+
+	ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
+	if (ret) {
+		atomic_dec(&ndlp->cmd_pending);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+				 "6113 FCP could not issue WQE err %x "
+				 "sid: x%x did: x%x oxid: x%x\n",
+				 ret, vport->fc_myDID, ndlp->nlp_DID,
+				 lpfc_ncmd->cur_iocbq.sli4_xritag);
+		goto out_free_nvme_buf;
+	}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on)
+		lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
+
+	if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+		lpfc_ncmd->cpu = smp_processor_id();
+		if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
+			/* Check for admin queue */
+			if (lpfc_queue_info->qidx) {
+				lpfc_printf_vlog(vport,
+						 KERN_ERR, LOG_NVME_IOERR,
+						"6702 CPU Check cmd: "
+						"cpu %d wq %d\n",
+						lpfc_ncmd->cpu,
+						lpfc_queue_info->index);
+			}
+			lpfc_ncmd->cpu = lpfc_queue_info->index;
+		}
+		if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
+			phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
+	}
+#endif
+	return 0;
+
+ out_free_nvme_buf:
+	if (lpfc_ncmd->nvmeCmd->sg_cnt) {
+		if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
+			atomic_dec(&phba->fc4NvmeOutputRequests);
+		else
+			atomic_dec(&phba->fc4NvmeInputRequests);
+	} else
+		atomic_dec(&phba->fc4NvmeControlRequests);
+	lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ out_fail:
+	return ret;
+}
+
+/**
+ * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This is the callback function for any NVME FCP IO that was aborted.
+ *
+ * Return value:
+ *   None
+ **/
+void
+lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			   struct lpfc_wcqe_complete *abts_cmpl)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+			"6145 ABORT_XRI_CN completing on rpi x%x "
+			"original iotag x%x, abort cmd iotag x%x "
+			"req_tag x%x, status x%x, hwstatus x%x\n",
+			cmdiocb->iocb.un.acxri.abortContextTag,
+			cmdiocb->iocb.un.acxri.abortIoTag,
+			cmdiocb->iotag,
+			bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
+			bf_get(lpfc_wcqe_c_status, abts_cmpl),
+			bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
+	lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as its nvme request io abort handler.  This
+ * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
+ * is executed asynchronously - one the target is validated as "MAPPED" and
+ * ready for IO, the driver issues the abort request and returns.
+ *
+ * Return value:
+ *   None
+ **/
+static void
+lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
+		    struct nvme_fc_remote_port *pnvme_rport,
+		    void *hw_queue_handle,
+		    struct nvmefc_fcp_req *pnvme_fcreq)
+{
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	struct lpfc_nvme_rport *rport;
+	struct lpfc_nvme_buf *lpfc_nbuf;
+	struct lpfc_iocbq *abts_buf;
+	struct lpfc_iocbq *nvmereq_wqe;
+	struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+	union lpfc_wqe *abts_wqe;
+	unsigned long flags;
+	int ret_val;
+
+	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+	rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+	vport = lport->vport;
+	phba = vport->phba;
+
+	/* Announce entry to new IO submit field. */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+			 "6002 Abort Request to rport DID x%06x "
+			 "for nvme_fc_req %p\n",
+			 pnvme_rport->port_id,
+			 pnvme_fcreq);
+
+	/* If the hba is getting reset, this flag is set.  It is
+	 * cleared when the reset is complete and rings reestablished.
+	 */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	/* driver queued commands are in process of being flushed */
+	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6139 Driver in reset cleanup - flushing "
+				 "NVME Req now.  hba_flag x%x\n",
+				 phba->hba_flag);
+		return;
+	}
+
+	lpfc_nbuf = freqpriv->nvme_buf;
+	if (!lpfc_nbuf) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6140 NVME IO req has no matching lpfc nvme "
+				 "io buffer.  Skipping abort req.\n");
+		return;
+	} else if (!lpfc_nbuf->nvmeCmd) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6141 lpfc NVME IO req has no nvme_fcreq "
+				 "io buffer.  Skipping abort req.\n");
+		return;
+	}
+	nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
+
+	/*
+	 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
+	 * state must match the nvme_fcreq passed by the nvme
+	 * transport.  If they don't match, it is likely the driver
+	 * has already completed the NVME IO and the nvme transport
+	 * has not seen it yet.
+	 */
+	if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6143 NVME req mismatch: "
+				 "lpfc_nbuf %p nvmeCmd %p, "
+				 "pnvme_fcreq %p.  Skipping Abort xri x%x\n",
+				 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
+				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
+		return;
+	}
+
+	/* Don't abort IOs no longer on the pending queue. */
+	if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6142 NVME IO req %p not queued - skipping "
+				 "abort req xri x%x\n",
+				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
+		return;
+	}
+
+	lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
+			 nvmereq_wqe->sli4_xritag,
+			 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
+
+	/* Outstanding abort is in progress */
+	if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6144 Outstanding NVME I/O Abort Request "
+				 "still pending on nvme_fcreq %p, "
+				 "lpfc_ncmd %p xri x%x\n",
+				 pnvme_fcreq, lpfc_nbuf,
+				 nvmereq_wqe->sli4_xritag);
+		return;
+	}
+
+	abts_buf = __lpfc_sli_get_iocbq(phba);
+	if (!abts_buf) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6136 No available abort wqes. Skipping "
+				 "Abts req for nvme_fcreq %p xri x%x\n",
+				 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
+		return;
+	}
+
+	/* Ready - mark outstanding as aborted by driver. */
+	nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+	/* Complete prepping the abort wqe and issue to the FW. */
+	abts_wqe = &abts_buf->wqe;
+
+	/* WQEs are reused.  Clear stale data and set key fields to
+	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+	 */
+	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
+
+	/* word 7 */
+	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+	bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
+	       nvmereq_wqe->iocb.ulpClass);
+
+	/* word 8 - tell the FW to abort the IO associated with this
+	 * outstanding exchange ID.
+	 */
+	abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
+
+	/* word 9 - this is the iotag for the abts_wqe completion. */
+	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
+	       abts_buf->iotag);
+
+	/* word 10 */
+	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
+	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+	/* word 11 */
+	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
+	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abts_buf->iocb_flag |= LPFC_IO_NVME;
+	abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
+	abts_buf->vport = vport;
+	abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
+	ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (ret_val) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+				 "6137 Failed abts issue_wqe with status x%x "
+				 "for nvme_fcreq %p.\n",
+				 ret_val, pnvme_fcreq);
+		lpfc_sli_release_iocbq(phba, abts_buf);
+		return;
+	}
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+			 "6138 Transport Abort NVME Request Issued for "
+			 "ox_id x%x on reqtag x%x\n",
+			 nvmereq_wqe->sli4_xritag,
+			 abts_buf->iotag);
+}
+
+/* Declare and initialization an instance of the FC NVME template. */
+static struct nvme_fc_port_template lpfc_nvme_template = {
+	/* initiator-based functions */
+	.localport_delete  = lpfc_nvme_localport_delete,
+	.remoteport_delete = lpfc_nvme_remoteport_delete,
+	.create_queue = lpfc_nvme_create_queue,
+	.delete_queue = lpfc_nvme_delete_queue,
+	.ls_req       = lpfc_nvme_ls_req,
+	.fcp_io       = lpfc_nvme_fcp_io_submit,
+	.ls_abort     = lpfc_nvme_ls_abort,
+	.fcp_abort    = lpfc_nvme_fcp_abort,
+
+	.max_hw_queues = 1,
+	.max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
+	.max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
+	.dma_boundary = 0xFFFFFFFF,
+
+	/* Sizes of additional private data for data structures.
+	 * No use for the last two sizes at this time.
+	 */
+	.local_priv_sz = sizeof(struct lpfc_nvme_lport),
+	.remote_priv_sz = sizeof(struct lpfc_nvme_rport),
+	.lsrqst_priv_sz = 0,
+	.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
+};
+
+/**
+ * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @nblist: pointer to nvme buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+static int
+lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
+			      struct list_head *nblist,
+			      int count)
+{
+	struct lpfc_nvme_buf *lpfc_ncmd;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0;
+	int rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	dma_addr_t pdma_phys_bpl1;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* Calculate the requested length of the dma memory */
+	reqlen = count * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"6118 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6119 Failed to allocate mbox cmd memory\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+				LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6120 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory */
+	viraddr = mbox->sge_array->addr[0];
+
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	pg_pairs = 0;
+	list_for_each_entry(lpfc_ncmd, nblist, list) {
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
+		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
+						SGL_PAGE_SIZE;
+		else
+			pdma_phys_bpl1 = 0;
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
+		sgl_pg_pairs++;
+		pg_pairs++;
+	}
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+	/* Perform endian conversion if necessary */
+	sgl->word0 = cpu_to_le32(sgl->word0);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"6125 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_nblist: pointer to the nvme buffer list.
+ *
+ * This routine walks a list of nvme buffers that was passed in. It attempts
+ * to construct blocks of nvme buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_nblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+static int
+lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
+			     struct list_head *post_nblist, int sb_count)
+{
+	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+	int status, sgl_size;
+	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+	dma_addr_t pdma_phys_sgl1;
+	int last_xritag = NO_XRI;
+	int cur_xritag;
+	LIST_HEAD(prep_nblist);
+	LIST_HEAD(blck_nblist);
+	LIST_HEAD(nvme_nblist);
+
+	/* sanity check */
+	if (sb_count <= 0)
+		return -EINVAL;
+
+	sgl_size = phba->cfg_sg_dma_buf_size;
+
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
+		list_del_init(&lpfc_ncmd->list);
+		block_cnt++;
+		if ((last_xritag != NO_XRI) &&
+		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+			/* a hole in xri block, form a sgl posting block */
+			list_splice_init(&prep_nblist, &blck_nblist);
+			post_cnt = block_cnt - 1;
+			/* prepare list for next posting block */
+			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+			block_cnt = 1;
+		} else {
+			/* prepare list for next posting block */
+			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+			/* enough sgls for non-embed sgl mbox command */
+			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				list_splice_init(&prep_nblist, &blck_nblist);
+				post_cnt = block_cnt;
+				block_cnt = 0;
+			}
+		}
+		num_posting++;
+		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+
+		/* end of repost sgl list condition for NVME buffers */
+		if (num_posting == sb_count) {
+			if (post_cnt == 0) {
+				/* last sgl posting block */
+				list_splice_init(&prep_nblist, &blck_nblist);
+				post_cnt = block_cnt;
+			} else if (block_cnt == 1) {
+				/* last single sgl with non-contiguous xri */
+				if (sgl_size > SGL_PAGE_SIZE)
+					pdma_phys_sgl1 =
+						lpfc_ncmd->dma_phys_sgl +
+						SGL_PAGE_SIZE;
+				else
+					pdma_phys_sgl1 = 0;
+				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+				status = lpfc_sli4_post_sgl(phba,
+						lpfc_ncmd->dma_phys_sgl,
+						pdma_phys_sgl1, cur_xritag);
+				if (status) {
+					/* failure, put on abort nvme list */
+					lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
+				} else {
+					/* success, put on NVME buffer list */
+					lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+					lpfc_ncmd->status = IOSTAT_SUCCESS;
+					num_posted++;
+				}
+				/* success, put on NVME buffer sgl list */
+				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+			}
+		}
+
+		/* continue until a nembed page worth of sgls */
+		if (post_cnt == 0)
+			continue;
+
+		/* post block of NVME buffer list sgls */
+		status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
+						       post_cnt);
+
+		/* don't reset xirtag due to hole in xri block */
+		if (block_cnt == 0)
+			last_xritag = NO_XRI;
+
+		/* reset NVME buffer post count for next round of posting */
+		post_cnt = 0;
+
+		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
+		while (!list_empty(&blck_nblist)) {
+			list_remove_head(&blck_nblist, lpfc_ncmd,
+					 struct lpfc_nvme_buf, list);
+			if (status) {
+				/* failure, put on abort nvme list */
+				lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
+			} else {
+				/* success, put on NVME buffer list */
+				lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+				lpfc_ncmd->status = IOSTAT_SUCCESS;
+				num_posted++;
+			}
+			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+		}
+	}
+	/* Push NVME buffers with sgl posted to the available list */
+	while (!list_empty(&nvme_nblist)) {
+		list_remove_head(&nvme_nblist, lpfc_ncmd,
+				 struct lpfc_nvme_buf, list);
+		lpfc_release_nvme_buf(phba, lpfc_ncmd);
+	}
+	return num_posted;
+}
+
+/**
+ * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of nvme buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
+ * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
+{
+	LIST_HEAD(post_nblist);
+	int num_posted, rc = 0;
+
+	/* get all NVME buffers need to repost to a local list */
+	spin_lock_irq(&phba->nvme_buf_list_get_lock);
+	spin_lock(&phba->nvme_buf_list_put_lock);
+	list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
+	list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+	spin_unlock(&phba->nvme_buf_list_put_lock);
+	spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+
+	/* post the list of nvme buffer sgls to port if available */
+	if (!list_empty(&post_nblist)) {
+		num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
+						phba->sli4_hba.nvme_xri_cnt);
+		/* failed to post any nvme buffer, return error */
+		if (num_posted == 0)
+			rc = -EIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates nvme buffers for device with SLI-4 interface spec,
+ * the nvme buffer contains all the necessary information needed to initiate
+ * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ *   int - number of nvme buffers that were allocated and posted.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_nvme_buf *lpfc_ncmd;
+	struct lpfc_iocbq *pwqeq;
+	union lpfc_wqe128 *wqe;
+	struct sli4_sge *sgl;
+	dma_addr_t pdma_phys_sgl;
+	uint16_t iotag, lxri = 0;
+	int bcnt, num_posted, sgl_size;
+	LIST_HEAD(prep_nblist);
+	LIST_HEAD(post_nblist);
+	LIST_HEAD(nvme_nblist);
+
+	sgl_size = phba->cfg_sg_dma_buf_size;
+
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
+		if (!lpfc_ncmd)
+			break;
+		/*
+		 * Get memory from the pci pool to map the virt space to
+		 * pci bus space for an I/O. The DMA buffer includes the
+		 * number of SGE's necessary to support the sg_tablesize.
+		 */
+		lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+						 GFP_KERNEL,
+						 &lpfc_ncmd->dma_handle);
+		if (!lpfc_ncmd->data) {
+			kfree(lpfc_ncmd);
+			break;
+		}
+		memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
+
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+			kfree(lpfc_ncmd);
+			break;
+		}
+		pwqeq = &(lpfc_ncmd->cur_iocbq);
+		wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
+
+		/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, pwqeq);
+		if (iotag == 0) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+			kfree(lpfc_ncmd);
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6121 Failed to allocated IOTAG for"
+					" XRI:0x%x\n", lxri);
+			lpfc_sli4_free_xri(phba, lxri);
+			break;
+		}
+		pwqeq->sli4_lxritag = lxri;
+		pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+		pwqeq->iocb_flag |= LPFC_IO_NVME;
+		pwqeq->context1 = lpfc_ncmd;
+		pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+
+		/* Initialize local short-hand pointers. */
+		lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
+		sgl = lpfc_ncmd->nvme_sgl;
+		pdma_phys_sgl = lpfc_ncmd->dma_handle;
+		lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
+
+		/* Rsp SGE will be filled in when we rcv an IO
+		 * from the NVME Layer to be sent.
+		 * The cmd is going to be embedded so we need a SKIP SGE.
+		 */
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		/* Fill in word 3 / sgl_len during cmd submission */
+
+		lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+
+		/* Word 7 */
+		bf_set(wqe_erp, &wqe->generic.wqe_com, 0);
+		/* NVME upper layers will time things out, if needed */
+		bf_set(wqe_tmo, &wqe->generic.wqe_com, 0);
+
+		/* Word 10 */
+		bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+
+		/* add the nvme buffer to a post list */
+		list_add_tail(&lpfc_ncmd->list, &post_nblist);
+		spin_lock_irq(&phba->nvme_buf_list_get_lock);
+		phba->sli4_hba.nvme_xri_cnt++;
+		spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+			"6114 Allocate %d out of %d requested new NVME "
+			"buffers\n", bcnt, num_to_alloc);
+
+	/* post the list of nvme buffer sgls to port if available */
+	if (!list_empty(&post_nblist))
+		num_posted = lpfc_post_nvme_sgl_list(phba,
+						     &post_nblist, bcnt);
+	else
+		num_posted = 0;
+
+	return num_posted;
+}
+
+/**
+ * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_nvme_buf - Success
+ **/
+static struct lpfc_nvme_buf *
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+	unsigned long iflag = 0;
+	int found = 0;
+
+	spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
+	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+				 &phba->lpfc_nvme_buf_list_get, list) {
+		if (lpfc_test_rrq_active(phba, ndlp,
+					 lpfc_ncmd->cur_iocbq.sli4_lxritag))
+			continue;
+		list_del_init(&lpfc_ncmd->list);
+		found = 1;
+		break;
+	}
+	if (!found) {
+		spin_lock(&phba->nvme_buf_list_put_lock);
+		list_splice(&phba->lpfc_nvme_buf_list_put,
+			    &phba->lpfc_nvme_buf_list_get);
+		INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+		spin_unlock(&phba->nvme_buf_list_put_lock);
+		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+					 &phba->lpfc_nvme_buf_list_get, list) {
+			if (lpfc_test_rrq_active(
+				phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
+				continue;
+			list_del_init(&lpfc_ncmd->list);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+	if (!found)
+		return NULL;
+	return  lpfc_ncmd;
+}
+
+/**
+ * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_ncmd: The nvme buffer which is being released.
+ *
+ * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
+ * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
+{
+	unsigned long iflag = 0;
+
+	lpfc_ncmd->nonsg_phys = 0;
+	if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6310 XB release deferred for "
+				"ox_id x%x on reqtag x%x\n",
+				lpfc_ncmd->cur_iocbq.sli4_xritag,
+				lpfc_ncmd->cur_iocbq.iotag);
+
+		spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
+					iflag);
+		lpfc_ncmd->nvmeCmd = NULL;
+		list_add_tail(&lpfc_ncmd->list,
+			&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
+					iflag);
+	} else {
+		lpfc_ncmd->nvmeCmd = NULL;
+		lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
+		spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+		list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+		spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+	}
+}
+
+/**
+ * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
+ * @pvport - the lpfc_vport instance requesting a localport.
+ *
+ * This routine is invoked to create an nvme localport instance to bind
+ * to the nvme_fc_transport.  It is called once during driver load
+ * like lpfc_create_shost after all other services are initialized.
+ * It requires a vport, vpi, and wwns at call time.  Other localport
+ * parameters are modified as the driver's FCID and the Fabric WWN
+ * are established.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - no heap memory available
+ *      other values - from nvme registration upcall
+ **/
+int
+lpfc_nvme_create_localport(struct lpfc_vport *vport)
+{
+	int ret = 0;
+	struct lpfc_hba  *phba = vport->phba;
+	struct nvme_fc_port_info nfcp_info;
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_lport *lport;
+	int len;
+
+	/* Initialize this localport instance.  The vport wwn usage ensures
+	 * that NPIV is accounted for.
+	 */
+	memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
+	nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+	nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
+	nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
+
+	/* Limit to LPFC_MAX_NVME_SEG_CNT.
+	 * For now need + 1 to get around NVME transport logic.
+	 */
+	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
+				 "6300 Reducing sg segment cnt to %d\n",
+				 LPFC_MAX_NVME_SEG_CNT);
+		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+	} else {
+		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+	}
+	lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
+	lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
+
+	/* localport is allocated from the stack, but the registration
+	 * call allocates heap memory as well as the private area.
+	 */
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
+					 &vport->phba->pcidev->dev, &localport);
+#else
+	ret = -ENOMEM;
+#endif
+	if (!ret) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
+				 "6005 Successfully registered local "
+				 "NVME port num %d, localP %p, private %p, "
+				 "sg_seg %d\n",
+				 localport->port_num, localport,
+				 localport->private,
+				 lpfc_nvme_template.max_sgl_segments);
+
+		/* Private is our lport size declared in the template. */
+		lport = (struct lpfc_nvme_lport *)localport->private;
+		vport->localport = localport;
+		lport->vport = vport;
+		vport->nvmei_support = 1;
+
+		/* Don't post more new bufs if repost already recovered
+		 * the nvme sgls.
+		 */
+		if (phba->sli4_hba.nvme_xri_cnt == 0) {
+			len  = lpfc_new_nvme_buf(vport,
+						 phba->sli4_hba.nvme_xri_max);
+			vport->phba->total_nvme_bufs += len;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
+ * @pnvme: pointer to lpfc nvme data structure.
+ *
+ * This routine is invoked to destroy all lports bound to the phba.
+ * The lport memory was allocated by the nvme fc transport and is
+ * released there.  This routine ensures all rports bound to the
+ * lport have been disconnected.
+ *
+ **/
+void
+lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_lport *lport;
+	int ret;
+
+	if (vport->nvmei_support == 0)
+		return;
+
+	localport = vport->localport;
+	vport->localport = NULL;
+	lport = (struct lpfc_nvme_lport *)localport->private;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+			 "6011 Destroying NVME localport %p\n",
+			 localport);
+
+	/* lport's rport list is clear.  Unregister
+	 * lport and release resources.
+	 */
+	init_completion(&lport->lport_unreg_done);
+	ret = nvme_fc_unregister_localport(localport);
+	wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+	/* Regardless of the unregister upcall response, clear
+	 * nvmei_support.  All rports are unregistered and the
+	 * driver will clean up.
+	 */
+	vport->nvmei_support = 0;
+	if (ret == 0) {
+		lpfc_printf_vlog(vport,
+				 KERN_INFO, LOG_NVME_DISC,
+				 "6009 Unregistered lport Success\n");
+	} else {
+		lpfc_printf_vlog(vport,
+				 KERN_INFO, LOG_NVME_DISC,
+				 "6010 Unregistered lport "
+				 "Failed, status x%x\n",
+				 ret);
+	}
+#endif
+}
+
+void
+lpfc_nvme_update_localport(struct lpfc_vport *vport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_lport *lport;
+
+	localport = vport->localport;
+	if (!localport) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
+				 "6710 Update NVME fail. No localport\n");
+		return;
+	}
+	lport = (struct lpfc_nvme_lport *)localport->private;
+	if (!lport) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
+				 "6171 Update NVME fail. localP %p, No lport\n",
+				 localport);
+		return;
+	}
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+			 "6012 Update NVME lport %p did x%x\n",
+			 localport, vport->fc_myDID);
+
+	localport->port_id = vport->fc_myDID;
+	if (localport->port_id == 0)
+		localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
+	else
+		localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			 "6030 bound lport %p to DID x%06x\n",
+			 lport, localport->port_id);
+#endif
+}
+
+int
+lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	int ret = 0;
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_rport *rport;
+	struct nvme_fc_remote_port *remote_port;
+	struct nvme_fc_port_info rpinfo;
+
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
+			 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_type);
+
+	localport = vport->localport;
+	if (!localport)
+		return 0;
+
+	lport = (struct lpfc_nvme_lport *)localport->private;
+
+	/* NVME rports are not preserved across devloss.
+	 * Just register this instance.  Note, rpinfo->dev_loss_tmo
+	 * is left 0 to indicate accept transport defaults.  The
+	 * driver communicates port role capabilities consistent
+	 * with the PRLI response data.
+	 */
+	memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
+	rpinfo.port_id = ndlp->nlp_DID;
+	if (ndlp->nlp_type & NLP_NVME_TARGET)
+		rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
+	if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+		rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
+
+	if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+		rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+	rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+	rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+	ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
+	if (!ret) {
+		/* If the ndlp already has an nrport, this is just
+		 * a resume of the existing rport.  Else this is a
+		 * new rport.
+		 */
+		rport = remote_port->private;
+		if (ndlp->nrport == rport) {
+			lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+					 LOG_NVME_DISC,
+					 "6014 Rebinding lport to "
+					 "rport wwpn 0x%llx, "
+					 "Data: x%x x%x x%x x%06x\n",
+					 remote_port->port_name,
+					 remote_port->port_id,
+					 remote_port->port_role,
+					 ndlp->nlp_type,
+					 ndlp->nlp_DID);
+		} else {
+			/* New rport. */
+			rport->remoteport = remote_port;
+			rport->lport = lport;
+			rport->ndlp = lpfc_nlp_get(ndlp);
+			if (!rport->ndlp)
+				return -1;
+			ndlp->nrport = rport;
+			lpfc_printf_vlog(vport, KERN_INFO,
+					 LOG_NVME_DISC | LOG_NODE,
+					 "6022 Binding new rport to "
+					 "lport %p Rport WWNN 0x%llx, "
+					 "Rport WWPN 0x%llx DID "
+					 "x%06x Role x%x\n",
+					 lport,
+					 rpinfo.node_name, rpinfo.port_name,
+					 rpinfo.port_id, rpinfo.port_role);
+		}
+	} else {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				 LOG_NVME_DISC | LOG_NODE,
+				 "6031 RemotePort Registration failed "
+				 "err: %d, DID x%06x\n",
+				 ret, ndlp->nlp_DID);
+	}
+
+	return ret;
+#else
+	return 0;
+#endif
+}
+
+/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
+ *
+ * There is no notion of Devloss or rport recovery from the current
+ * nvme_transport perspective.  Loss of an rport just means IO cannot
+ * be sent and recovery is completely up to the initator.
+ * For now, the driver just unbinds the DID and port_role so that
+ * no further IO can be issued.  Changes are planned for later.
+ *
+ * Notes - the ndlp reference count is not decremented here since
+ * since there is no nvme_transport api for devloss.  Node ref count
+ * is only adjusted in driver unload.
+ */
+void
+lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	int ret;
+	struct nvme_fc_local_port *localport;
+	struct lpfc_nvme_lport *lport;
+	struct lpfc_nvme_rport *rport;
+	struct nvme_fc_remote_port *remoteport;
+
+	localport = vport->localport;
+
+	/* This is fundamental error.  The localport is always
+	 * available until driver unload.  Just exit.
+	 */
+	if (!localport)
+		return;
+
+	lport = (struct lpfc_nvme_lport *)localport->private;
+	if (!lport)
+		goto input_err;
+
+	rport = ndlp->nrport;
+	if (!rport)
+		goto input_err;
+
+	remoteport = rport->remoteport;
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+			 "6033 Unreg nvme remoteport %p, portname x%llx, "
+			 "port_id x%06x, portstate x%x port type x%x\n",
+			 remoteport, remoteport->port_name,
+			 remoteport->port_id, remoteport->port_state,
+			 ndlp->nlp_type);
+
+	/* Sanity check ndlp type.  Only call for NVME ports. Don't
+	 * clear any rport state until the transport calls back.
+	 */
+	if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
+		init_completion(&rport->rport_unreg_done);
+
+		/* No concern about the role change on the nvme remoteport.
+		 * The transport will update it.
+		 */
+		ret = nvme_fc_unregister_remoteport(remoteport);
+		if (ret != 0) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+					 "6167 NVME unregister failed %d "
+					 "port_state x%x\n",
+					 ret, remoteport->port_state);
+		}
+
+	}
+	return;
+
+ input_err:
+#endif
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+			 "6168 State error: lport %p, rport%p FCID x%06x\n",
+			 vport->localport, ndlp->rport, ndlp->nlp_DID);
+}
+
+/**
+ * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+			   struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+	struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+	struct lpfc_nodelist *ndlp;
+	unsigned long iflag = 0;
+	int rrq_empty = 0;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
+				 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
+				 list) {
+		if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
+			list_del_init(&lpfc_ncmd->list);
+			lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+			lpfc_ncmd->status = IOSTAT_SUCCESS;
+			spin_unlock(
+				&phba->sli4_hba.abts_nvme_buf_list_lock);
+
+			rrq_empty = list_empty(&phba->active_rrq_list);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			ndlp = lpfc_ncmd->ndlp;
+			if (ndlp) {
+				lpfc_set_rrq_active(
+					phba, ndlp,
+					lpfc_ncmd->cur_iocbq.sli4_lxritag,
+					rxid, 1);
+				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+			}
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+					"6311 XRI Aborted xri x%x tag x%x "
+					"released\n",
+					xri, lpfc_ncmd->cur_iocbq.iotag);
+
+			lpfc_release_nvme_buf(phba, lpfc_ncmd);
+			if (rrq_empty)
+				lpfc_worker_wake_up(phba);
+			return;
+		}
+	}
+	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6312 XRI Aborted xri x%x not found\n", xri);
+
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvme.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvme.h
new file mode 100644
index 0000000..d192bb2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvme.h
@@ -0,0 +1,101 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ ********************************************************************/
+
+#define LPFC_NVME_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
+#define LPFC_NVME_WQSIZE		256
+
+#define LPFC_NVME_ERSP_LEN		0x20
+
+struct lpfc_nvme_qhandle {
+	uint32_t index;		/* WQ index to use */
+	uint32_t qidx;		/* queue index passed to create */
+	uint32_t cpu_id;	/* current cpu id at time of create */
+};
+
+/* Declare nvme-based local and remote port definitions. */
+struct lpfc_nvme_lport {
+	struct lpfc_vport *vport;
+	struct completion lport_unreg_done;
+	/* Add sttats counters here */
+};
+
+struct lpfc_nvme_rport {
+	struct lpfc_nvme_lport *lport;
+	struct nvme_fc_remote_port *remoteport;
+	struct lpfc_nodelist *ndlp;
+	struct completion rport_unreg_done;
+};
+
+struct lpfc_nvme_buf {
+	struct list_head list;
+	struct nvmefc_fcp_req *nvmeCmd;
+	struct lpfc_nvme_rport *nrport;
+	struct lpfc_nodelist *ndlp;
+
+	uint32_t timeout;
+
+	uint16_t flags;  /* TBD convert exch_busy to flags */
+#define LPFC_SBUF_XBUSY         0x1     /* SLI4 hba reported XB on WCQE cmpl */
+	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
+	uint16_t status;	/* From IOCB Word 7- ulpStatus */
+	uint16_t cpu;
+	uint16_t qidx;
+	uint16_t sqid;
+	uint32_t result;	/* From IOCB Word 4. */
+
+	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
+				 * dma_map_sg.  The driver needs this for calls
+				 * to dma_unmap_sg.
+				 */
+	dma_addr_t nonsg_phys;	/* Non scatter-gather physical address. */
+
+	/*
+	 * data and dma_handle are the kernel virtual and bus address of the
+	 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+	 * gather bde list that supports the sg_tablesize value.
+	 */
+	void *data;
+	dma_addr_t dma_handle;
+
+	struct sli4_sge *nvme_sgl;
+	dma_addr_t dma_phys_sgl;
+
+	/* cur_iocbq has phys of the dma-able buffer.
+	 * Iotag is in here
+	 */
+	struct lpfc_iocbq cur_iocbq;
+
+	wait_queue_head_t *waitq;
+	unsigned long start_time;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint64_t ts_cmd_start;
+	uint64_t ts_last_cmd;
+	uint64_t ts_cmd_wqput;
+	uint64_t ts_isr_cmpl;
+	uint64_t ts_data_nvme;
+#endif
+};
+
+struct lpfc_nvme_fcpreq_priv {
+	struct lpfc_nvme_buf *nvme_buf;
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvmet.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvmet.c
new file mode 100644
index 0000000..eacdcb9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -0,0 +1,2838 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channsel Host Bus Adapters.                               *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ ********************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <../drivers/nvme/host/nvme.h>
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
+						 struct lpfc_nvmet_rcv_ctx *,
+						 dma_addr_t rspbuf,
+						 uint16_t rspsize);
+static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
+						  struct lpfc_nvmet_rcv_ctx *);
+static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
+					  struct lpfc_nvmet_rcv_ctx *,
+					  uint32_t, uint16_t);
+static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
+					    struct lpfc_nvmet_rcv_ctx *,
+					    uint32_t, uint16_t);
+static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
+					   struct lpfc_nvmet_rcv_ctx *,
+					   uint32_t, uint16_t);
+
+void
+lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
+{
+	unsigned long iflag;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+			"6313 NVMET Defer ctx release xri x%x flg x%x\n",
+			ctxp->oxid, ctxp->flag);
+
+	spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
+	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
+		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
+				       iflag);
+		return;
+	}
+	ctxp->flag |= LPFC_NVMET_CTX_RLS;
+	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
+}
+
+/**
+ * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME LS commands
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+			  struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct nvmefc_tgt_ls_req *rsp;
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	uint32_t status, result;
+
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	result = wcqe->parameter;
+	ctxp = cmdwqe->context2;
+
+	if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6410 NVMET LS cmpl state mismatch IO x%x: "
+				"%d %d\n",
+				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+	}
+
+	if (!phba->targetport)
+		goto out;
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
+	if (status)
+		atomic_inc(&tgtp->xmt_ls_rsp_error);
+	else
+		atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+
+out:
+	rsp = &ctxp->ctx.ls_req;
+
+	lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
+			 ctxp->oxid, status, result);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+			"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
+			status, result, ctxp->oxid);
+
+	lpfc_nlp_put(cmdwqe->context1);
+	cmdwqe->context2 = NULL;
+	cmdwqe->context3 = NULL;
+	lpfc_sli_release_iocbq(phba, cmdwqe);
+	rsp->done(rsp);
+	kfree(ctxp);
+}
+
+/**
+ * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
+ * @phba: HBA buffer is associated with
+ * @ctxp: context to clean up
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given by
+ * reposting it to its associated RQ so it can be reused.
+ *
+ * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct fc_frame_header *fc_hdr;
+	struct rqb_dmabuf *nvmebuf;
+	struct lpfc_nvmet_ctx_info *infop;
+	uint32_t *payload;
+	uint32_t size, oxid, sid, rc;
+	int cpu;
+	unsigned long iflag;
+
+	if (ctxp->txrdy) {
+		dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+			      ctxp->txrdy_phys);
+		ctxp->txrdy = NULL;
+		ctxp->txrdy_phys = 0;
+	}
+
+	if (ctxp->state == LPFC_NVMET_STE_FREE) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6411 NVMET free, already free IO x%x: %d %d\n",
+				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+	}
+	ctxp->state = LPFC_NVMET_STE_FREE;
+
+	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+	if (phba->sli4_hba.nvmet_io_wait_cnt) {
+		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
+				 nvmebuf, struct rqb_dmabuf,
+				 hbuf.list);
+		phba->sli4_hba.nvmet_io_wait_cnt--;
+		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+				       iflag);
+
+		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+		payload = (uint32_t *)(nvmebuf->dbuf.virt);
+		size = nvmebuf->bytes_recv;
+		sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+		ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+		ctxp->wqeq = NULL;
+		ctxp->txrdy = NULL;
+		ctxp->offset = 0;
+		ctxp->phba = phba;
+		ctxp->size = size;
+		ctxp->oxid = oxid;
+		ctxp->sid = sid;
+		ctxp->state = LPFC_NVMET_STE_RCV;
+		ctxp->entry_cnt = 1;
+		ctxp->flag = 0;
+		ctxp->ctxbuf = ctx_buf;
+		spin_lock_init(&ctxp->ctxlock);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+		if (phba->ktime_on) {
+			ctxp->ts_cmd_nvme = ktime_get_ns();
+			ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
+			ctxp->ts_nvme_data = 0;
+			ctxp->ts_data_wqput = 0;
+			ctxp->ts_isr_data = 0;
+			ctxp->ts_data_nvme = 0;
+			ctxp->ts_nvme_status = 0;
+			ctxp->ts_status_wqput = 0;
+			ctxp->ts_isr_status = 0;
+			ctxp->ts_status_nvme = 0;
+		}
+#endif
+		atomic_inc(&tgtp->rcv_fcp_cmd_in);
+		/*
+		 * The calling sequence should be:
+		 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
+		 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+		 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
+		 * the NVME command / FC header is stored.
+		 * A buffer has already been reposted for this IO, so just free
+		 * the nvmebuf.
+		 */
+		rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+					  payload, size);
+
+		/* Process FCP command */
+		if (rc == 0) {
+			atomic_inc(&tgtp->rcv_fcp_cmd_out);
+			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+			return;
+		}
+
+		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+				ctxp->oxid, rc,
+				atomic_read(&tgtp->rcv_fcp_cmd_in),
+				atomic_read(&tgtp->rcv_fcp_cmd_out),
+				atomic_read(&tgtp->xmt_fcp_release));
+
+		lpfc_nvmet_defer_release(phba, ctxp);
+		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+		nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+		return;
+	}
+	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+
+	/*
+	 * Use the CPU context list, from the MRQ the IO was received on
+	 * (ctxp->idx), to save context structure.
+	 */
+	cpu = smp_processor_id();
+	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
+	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
+	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+	infop->nvmet_ctx_list_cnt++;
+	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
+#endif
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+static void
+lpfc_nvmet_ktime(struct lpfc_hba *phba,
+		 struct lpfc_nvmet_rcv_ctx *ctxp)
+{
+	uint64_t seg1, seg2, seg3, seg4, seg5;
+	uint64_t seg6, seg7, seg8, seg9, seg10;
+
+	if (!phba->ktime_on)
+		return;
+
+	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
+	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
+	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
+	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
+	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
+		return;
+
+	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
+		return;
+	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
+		return;
+	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
+		return;
+	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
+		return;
+	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
+		return;
+	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
+		return;
+	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
+		return;
+	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
+		return;
+	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
+		return;
+	/*
+	 * Segment 1 - Time from FCP command received by MSI-X ISR
+	 * to FCP command is passed to NVME Layer.
+	 * Segment 2 - Time from FCP command payload handed
+	 * off to NVME Layer to Driver receives a Command op
+	 * from NVME Layer.
+	 * Segment 3 - Time from Driver receives a Command op
+	 * from NVME Layer to Command is put on WQ.
+	 * Segment 4 - Time from Driver WQ put is done
+	 * to MSI-X ISR for Command cmpl.
+	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
+	 * Command cmpl is passed to NVME Layer.
+	 * Segment 6 - Time from Command cmpl is passed to NVME
+	 * Layer to Driver receives a RSP op from NVME Layer.
+	 * Segment 7 - Time from Driver receives a RSP op from
+	 * NVME Layer to WQ put is done on TRSP FCP Status.
+	 * Segment 8 - Time from Driver WQ put is done on TRSP
+	 * FCP Status to MSI-X ISR for TRSP cmpl.
+	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
+	 * TRSP cmpl is passed to NVME Layer.
+	 * Segment 10 - Time from FCP command received by
+	 * MSI-X ISR to command is completed on wire.
+	 * (Segments 1 thru 8) for READDATA / WRITEDATA
+	 * (Segments 1 thru 4) for READDATA_RSP
+	 */
+	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
+	seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
+	seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
+		seg1 - seg2;
+	seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
+		seg1 - seg2 - seg3;
+	seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
+		seg1 - seg2 - seg3 - seg4;
+
+	/* For auto rsp commands seg6 thru seg10 will be 0 */
+	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
+		seg6 = (ctxp->ts_nvme_status -
+			ctxp->ts_isr_cmd) -
+			seg1 - seg2 - seg3 - seg4 - seg5;
+		seg7 = (ctxp->ts_status_wqput -
+			ctxp->ts_isr_cmd) -
+			seg1 - seg2 - seg3 -
+			seg4 - seg5 - seg6;
+		seg8 = (ctxp->ts_isr_status -
+			ctxp->ts_isr_cmd) -
+			seg1 - seg2 - seg3 - seg4 -
+			seg5 - seg6 - seg7;
+		seg9 = (ctxp->ts_status_nvme -
+			ctxp->ts_isr_cmd) -
+			seg1 - seg2 - seg3 - seg4 -
+			seg5 - seg6 - seg7 - seg8;
+		seg10 = (ctxp->ts_isr_status -
+			ctxp->ts_isr_cmd);
+	} else {
+		seg6 =  0;
+		seg7 =  0;
+		seg8 =  0;
+		seg9 =  0;
+		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
+	}
+
+	phba->ktime_seg1_total += seg1;
+	if (seg1 < phba->ktime_seg1_min)
+		phba->ktime_seg1_min = seg1;
+	else if (seg1 > phba->ktime_seg1_max)
+		phba->ktime_seg1_max = seg1;
+
+	phba->ktime_seg2_total += seg2;
+	if (seg2 < phba->ktime_seg2_min)
+		phba->ktime_seg2_min = seg2;
+	else if (seg2 > phba->ktime_seg2_max)
+		phba->ktime_seg2_max = seg2;
+
+	phba->ktime_seg3_total += seg3;
+	if (seg3 < phba->ktime_seg3_min)
+		phba->ktime_seg3_min = seg3;
+	else if (seg3 > phba->ktime_seg3_max)
+		phba->ktime_seg3_max = seg3;
+
+	phba->ktime_seg4_total += seg4;
+	if (seg4 < phba->ktime_seg4_min)
+		phba->ktime_seg4_min = seg4;
+	else if (seg4 > phba->ktime_seg4_max)
+		phba->ktime_seg4_max = seg4;
+
+	phba->ktime_seg5_total += seg5;
+	if (seg5 < phba->ktime_seg5_min)
+		phba->ktime_seg5_min = seg5;
+	else if (seg5 > phba->ktime_seg5_max)
+		phba->ktime_seg5_max = seg5;
+
+	phba->ktime_data_samples++;
+	if (!seg6)
+		goto out;
+
+	phba->ktime_seg6_total += seg6;
+	if (seg6 < phba->ktime_seg6_min)
+		phba->ktime_seg6_min = seg6;
+	else if (seg6 > phba->ktime_seg6_max)
+		phba->ktime_seg6_max = seg6;
+
+	phba->ktime_seg7_total += seg7;
+	if (seg7 < phba->ktime_seg7_min)
+		phba->ktime_seg7_min = seg7;
+	else if (seg7 > phba->ktime_seg7_max)
+		phba->ktime_seg7_max = seg7;
+
+	phba->ktime_seg8_total += seg8;
+	if (seg8 < phba->ktime_seg8_min)
+		phba->ktime_seg8_min = seg8;
+	else if (seg8 > phba->ktime_seg8_max)
+		phba->ktime_seg8_max = seg8;
+
+	phba->ktime_seg9_total += seg9;
+	if (seg9 < phba->ktime_seg9_min)
+		phba->ktime_seg9_min = seg9;
+	else if (seg9 > phba->ktime_seg9_max)
+		phba->ktime_seg9_max = seg9;
+out:
+	phba->ktime_seg10_total += seg10;
+	if (seg10 < phba->ktime_seg10_min)
+		phba->ktime_seg10_min = seg10;
+	else if (seg10 > phba->ktime_seg10_max)
+		phba->ktime_seg10_max = seg10;
+	phba->ktime_status_samples++;
+}
+#endif
+
+/**
+ * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME FCP commands
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+			  struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct nvmefc_tgt_fcp_req *rsp;
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	uint32_t status, result, op, start_clean;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t id;
+#endif
+
+	ctxp = cmdwqe->context2;
+	ctxp->flag &= ~LPFC_NVMET_IO_INP;
+
+	rsp = &ctxp->ctx.fcp_req;
+	op = rsp->op;
+
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	result = wcqe->parameter;
+
+	if (phba->targetport)
+		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	else
+		tgtp = NULL;
+
+	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
+			 ctxp->oxid, op, status);
+
+	if (status) {
+		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
+		rsp->transferred_length = 0;
+		if (tgtp)
+			atomic_inc(&tgtp->xmt_fcp_rsp_error);
+
+		/* pick up SLI4 exhange busy condition */
+		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+			ctxp->flag |= LPFC_NVMET_XBUSY;
+
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+					"6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
+					ctxp->oxid, status, result);
+		} else {
+			ctxp->flag &= ~LPFC_NVMET_XBUSY;
+		}
+
+	} else {
+		rsp->fcp_error = NVME_SC_SUCCESS;
+		if (op == NVMET_FCOP_RSP)
+			rsp->transferred_length = rsp->rsplen;
+		else
+			rsp->transferred_length = rsp->transfer_length;
+		if (tgtp)
+			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
+	}
+
+	if ((op == NVMET_FCOP_READDATA_RSP) ||
+	    (op == NVMET_FCOP_RSP)) {
+		/* Sanity check */
+		ctxp->state = LPFC_NVMET_STE_DONE;
+		ctxp->entry_cnt++;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+		if (phba->ktime_on) {
+			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
+				ctxp->ts_isr_data =
+					cmdwqe->isr_timestamp;
+				ctxp->ts_data_nvme =
+					ktime_get_ns();
+				ctxp->ts_nvme_status =
+					ctxp->ts_data_nvme;
+				ctxp->ts_status_wqput =
+					ctxp->ts_data_nvme;
+				ctxp->ts_isr_status =
+					ctxp->ts_data_nvme;
+				ctxp->ts_status_nvme =
+					ctxp->ts_data_nvme;
+			} else {
+				ctxp->ts_isr_status =
+					cmdwqe->isr_timestamp;
+				ctxp->ts_status_nvme =
+					ktime_get_ns();
+			}
+		}
+		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+			id = smp_processor_id();
+			if (ctxp->cpu != id)
+				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+						"6703 CPU Check cmpl: "
+						"cpu %d expect %d\n",
+						id, ctxp->cpu);
+			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
+				phba->cpucheck_cmpl_io[id]++;
+		}
+#endif
+		rsp->done(rsp);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+		if (phba->ktime_on)
+			lpfc_nvmet_ktime(phba, ctxp);
+#endif
+		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
+	} else {
+		ctxp->entry_cnt++;
+		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
+		memset(((char *)cmdwqe) + start_clean, 0,
+		       (sizeof(struct lpfc_iocbq) - start_clean));
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+		if (phba->ktime_on) {
+			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
+			ctxp->ts_data_nvme = ktime_get_ns();
+		}
+		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+			id = smp_processor_id();
+			if (ctxp->cpu != id)
+				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+						"6704 CPU Check cmdcmpl: "
+						"cpu %d expect %d\n",
+						id, ctxp->cpu);
+			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
+				phba->cpucheck_ccmpl_io[id]++;
+		}
+#endif
+		rsp->done(rsp);
+	}
+}
+
+static int
+lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
+		      struct nvmefc_tgt_ls_req *rsp)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp =
+		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
+	struct lpfc_hba *phba = ctxp->phba;
+	struct hbq_dmabuf *nvmebuf =
+		(struct hbq_dmabuf *)ctxp->rqb_buffer;
+	struct lpfc_iocbq *nvmewqeq;
+	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
+	struct lpfc_dmabuf dmabuf;
+	struct ulp_bde64 bpl;
+	int rc;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+			"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
+
+	if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
+	    (ctxp->entry_cnt != 1)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6412 NVMET LS rsp state mismatch "
+				"oxid x%x: %d %d\n",
+				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+	}
+	ctxp->state = LPFC_NVMET_STE_LS_RSP;
+	ctxp->entry_cnt++;
+
+	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
+				      rsp->rsplen);
+	if (nvmewqeq == NULL) {
+		atomic_inc(&nvmep->xmt_ls_drop);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6150 LS Drop IO x%x: Prep\n",
+				ctxp->oxid);
+		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+		atomic_inc(&nvmep->xmt_ls_abort);
+		lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
+						ctxp->sid, ctxp->oxid);
+		return -ENOMEM;
+	}
+
+	/* Save numBdes for bpl2sgl */
+	nvmewqeq->rsvd2 = 1;
+	nvmewqeq->hba_wqidx = 0;
+	nvmewqeq->context3 = &dmabuf;
+	dmabuf.virt = &bpl;
+	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
+	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
+	bpl.tus.f.bdeSize = rsp->rsplen;
+	bpl.tus.f.bdeFlags = 0;
+	bpl.tus.w = le32_to_cpu(bpl.tus.w);
+
+	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
+	nvmewqeq->iocb_cmpl = NULL;
+	nvmewqeq->context2 = ctxp;
+
+	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
+			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
+
+	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
+	if (rc == WQE_SUCCESS) {
+		/*
+		 * Okay to repost buffer here, but wait till cmpl
+		 * before freeing ctxp and iocbq.
+		 */
+		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+		ctxp->rqb_buffer = 0;
+		atomic_inc(&nvmep->xmt_ls_rsp);
+		return 0;
+	}
+	/* Give back resources */
+	atomic_inc(&nvmep->xmt_ls_drop);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+			"6151 LS Drop IO x%x: Issue %d\n",
+			ctxp->oxid, rc);
+
+	lpfc_nlp_put(nvmewqeq->context1);
+
+	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+	atomic_inc(&nvmep->xmt_ls_abort);
+	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
+	return -ENXIO;
+}
+
+static int
+lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
+		      struct nvmefc_tgt_fcp_req *rsp)
+{
+	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
+	struct lpfc_nvmet_rcv_ctx *ctxp =
+		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+	struct lpfc_hba *phba = ctxp->phba;
+	struct lpfc_iocbq *nvmewqeq;
+	int rc;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on) {
+		if (rsp->op == NVMET_FCOP_RSP)
+			ctxp->ts_nvme_status = ktime_get_ns();
+		else
+			ctxp->ts_nvme_data = ktime_get_ns();
+	}
+	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+		int id = smp_processor_id();
+		ctxp->cpu = id;
+		if (id < LPFC_CHECK_CPU_CNT)
+			phba->cpucheck_xmt_io[id]++;
+		if (rsp->hwqid != id) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6705 CPU Check OP: "
+					"cpu %d expect %d\n",
+					id, rsp->hwqid);
+			ctxp->cpu = rsp->hwqid;
+		}
+	}
+#endif
+
+	/* Sanity check */
+	if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
+	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
+		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6102 IO xri x%x aborted\n",
+				ctxp->oxid);
+		rc = -ENXIO;
+		goto aerr;
+	}
+
+	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
+	if (nvmewqeq == NULL) {
+		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6152 FCP Drop IO x%x: Prep\n",
+				ctxp->oxid);
+		rc = -ENXIO;
+		goto aerr;
+	}
+
+	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
+	nvmewqeq->iocb_cmpl = NULL;
+	nvmewqeq->context2 = ctxp;
+	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
+	ctxp->wqeq->hba_wqidx = rsp->hwqid;
+
+	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
+			 ctxp->oxid, rsp->op, rsp->rsplen);
+
+	ctxp->flag |= LPFC_NVMET_IO_INP;
+	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
+	if (rc == WQE_SUCCESS) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+		if (!phba->ktime_on)
+			return 0;
+		if (rsp->op == NVMET_FCOP_RSP)
+			ctxp->ts_status_wqput = ktime_get_ns();
+		else
+			ctxp->ts_data_wqput = ktime_get_ns();
+#endif
+		return 0;
+	}
+
+	/* Give back resources */
+	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+			"6153 FCP Drop IO x%x: Issue: %d\n",
+			ctxp->oxid, rc);
+
+	ctxp->wqeq->hba_wqidx = 0;
+	nvmewqeq->context2 = NULL;
+	nvmewqeq->context3 = NULL;
+	rc = -EBUSY;
+aerr:
+	return rc;
+}
+
+static void
+lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+	struct lpfc_nvmet_tgtport *tport = targetport->private;
+
+	/* release any threads waiting for the unreg to complete */
+	complete(&tport->tport_unreg_done);
+}
+
+static void
+lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+			 struct nvmefc_tgt_fcp_req *req)
+{
+	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
+	struct lpfc_nvmet_rcv_ctx *ctxp =
+		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+	struct lpfc_hba *phba = ctxp->phba;
+	unsigned long flags;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+			ctxp->oxid, ctxp->flag, ctxp->state);
+
+	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
+			 ctxp->oxid, ctxp->flag, ctxp->state);
+
+	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
+
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
+
+	/* Since iaab/iaar are NOT set, we need to check
+	 * if the firmware is in process of aborting IO
+	 */
+	if (ctxp->flag & LPFC_NVMET_XBUSY) {
+		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+		return;
+	}
+	ctxp->flag |= LPFC_NVMET_ABORT_OP;
+
+	/* An state of LPFC_NVMET_STE_RCV means we have just received
+	 * the NVME command and have not started processing it.
+	 * (by issuing any IO WQEs on this exchange yet)
+	 */
+	if (ctxp->state == LPFC_NVMET_STE_RCV)
+		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+						 ctxp->oxid);
+	else
+		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+					       ctxp->oxid);
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+}
+
+static void
+lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
+			   struct nvmefc_tgt_fcp_req *rsp)
+{
+	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
+	struct lpfc_nvmet_rcv_ctx *ctxp =
+		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+	struct lpfc_hba *phba = ctxp->phba;
+	unsigned long flags;
+	bool aborting = false;
+
+	if (ctxp->state != LPFC_NVMET_STE_DONE &&
+	    ctxp->state != LPFC_NVMET_STE_ABORT) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6413 NVMET release bad state %d %d oxid x%x\n",
+				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+	}
+
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
+	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
+	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
+		aborting = true;
+		/* let the abort path do the real release */
+		lpfc_nvmet_defer_release(phba, ctxp);
+	}
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
+	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
+			 ctxp->state, aborting);
+
+	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
+
+	if (aborting)
+		return;
+
+	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+}
+
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+		     struct nvmefc_tgt_fcp_req *rsp)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_nvmet_rcv_ctx *ctxp =
+		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+	struct lpfc_hba *phba = ctxp->phba;
+
+	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+			 ctxp->oxid, ctxp->size, smp_processor_id());
+
+	tgtp = phba->targetport->private;
+	atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+	lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
+static struct nvmet_fc_target_template lpfc_tgttemplate = {
+	.targetport_delete = lpfc_nvmet_targetport_delete,
+	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
+	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
+	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
+	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+	.defer_rcv	= lpfc_nvmet_defer_rcv,
+
+	.max_hw_queues  = 1,
+	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
+	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
+	.dma_boundary = 0xFFFFFFFF,
+
+	/* optional features */
+	.target_features = 0,
+	/* sizes of additional private data for data structures */
+	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
+};
+
+static void
+__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
+		struct lpfc_nvmet_ctx_info *infop)
+{
+	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
+	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+				&infop->nvmet_ctx_list, list) {
+		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		list_del_init(&ctx_buf->list);
+		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+
+		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
+		ctx_buf->sglq->state = SGL_FREED;
+		ctx_buf->sglq->ndlp = NULL;
+
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		list_add_tail(&ctx_buf->sglq->list,
+				&phba->sli4_hba.lpfc_nvmet_sgl_list);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+
+		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+		kfree(ctx_buf->context);
+	}
+	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
+}
+
+static void
+lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
+{
+	struct lpfc_nvmet_ctx_info *infop;
+	int i, j;
+
+	/* The first context list, MRQ 0 CPU 0 */
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	if (!infop)
+		return;
+
+	/* Cycle the the entire CPU context list for every MRQ */
+	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+		for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
+			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
+			infop++; /* next */
+		}
+	}
+	kfree(phba->sli4_hba.nvmet_ctx_info);
+	phba->sli4_hba.nvmet_ctx_info = NULL;
+}
+
+static int
+lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
+{
+	struct lpfc_nvmet_ctxbuf *ctx_buf;
+	struct lpfc_iocbq *nvmewqe;
+	union lpfc_wqe128 *wqe;
+	struct lpfc_nvmet_ctx_info *last_infop;
+	struct lpfc_nvmet_ctx_info *infop;
+	int i, j, idx;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+			"6403 Allocate NVMET resources for %d XRIs\n",
+			phba->sli4_hba.nvmet_xri_cnt);
+
+	phba->sli4_hba.nvmet_ctx_info = kcalloc(
+		phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
+		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
+	if (!phba->sli4_hba.nvmet_ctx_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"6419 Failed allocate memory for "
+				"nvmet context lists\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * Assuming X CPUs in the system, and Y MRQs, allocate some
+	 * lpfc_nvmet_ctx_info structures as follows:
+	 *
+	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
+	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
+	 * ...
+	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
+	 *
+	 * Each line represents a MRQ "silo" containing an entry for
+	 * every CPU.
+	 *
+	 * MRQ X is initially assumed to be associated with CPU X, thus
+	 * contexts are initially distributed across all MRQs using
+	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
+	 * freed, the are freed to the MRQ silo based on the CPU number
+	 * of the IO completion. Thus a context that was allocated for MRQ A
+	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
+	 */
+	infop = phba->sli4_hba.nvmet_ctx_info;
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
+			spin_lock_init(&infop->nvmet_ctx_list_lock);
+			infop->nvmet_ctx_list_cnt = 0;
+			infop++;
+		}
+	}
+
+	/*
+	 * Setup the next CPU context info ptr for each MRQ.
+	 * MRQ 0 will cycle thru CPUs 0 - X separately from
+	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
+	 */
+	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+		last_infop = lpfc_get_ctx_list(phba, 0, j);
+		for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
+			infop = lpfc_get_ctx_list(phba, i, j);
+			infop->nvmet_ctx_next_cpu = last_infop;
+			last_infop = infop;
+		}
+	}
+
+	/* For all nvmet xris, allocate resources needed to process a
+	 * received command on a per xri basis.
+	 */
+	idx = 0;
+	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
+		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
+		if (!ctx_buf) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+					"6404 Ran out of memory for NVMET\n");
+			return -ENOMEM;
+		}
+
+		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
+					   GFP_KERNEL);
+		if (!ctx_buf->context) {
+			kfree(ctx_buf);
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+					"6405 Ran out of NVMET "
+					"context memory\n");
+			return -ENOMEM;
+		}
+		ctx_buf->context->ctxbuf = ctx_buf;
+		ctx_buf->context->state = LPFC_NVMET_STE_FREE;
+
+		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
+		if (!ctx_buf->iocbq) {
+			kfree(ctx_buf->context);
+			kfree(ctx_buf);
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+					"6406 Ran out of NVMET iocb/WQEs\n");
+			return -ENOMEM;
+		}
+		ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+		nvmewqe = ctx_buf->iocbq;
+		wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+		/* Initialize WQE */
+		memset(wqe, 0, sizeof(union lpfc_wqe));
+		/* Word 7 */
+		bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
+		bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+		/* Word 10 */
+		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+		bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+		bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
+
+		ctx_buf->iocbq->context1 = NULL;
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		if (!ctx_buf->sglq) {
+			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+			kfree(ctx_buf->context);
+			kfree(ctx_buf);
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+					"6407 Ran out of NVMET XRIs\n");
+			return -ENOMEM;
+		}
+
+		/*
+		 * Add ctx to MRQidx context list. Our initial assumption
+		 * is MRQidx will be associated with CPUidx. This association
+		 * can change on the fly.
+		 */
+		infop = lpfc_get_ctx_list(phba, idx, idx);
+		spin_lock(&infop->nvmet_ctx_list_lock);
+		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
+		infop->nvmet_ctx_list_cnt++;
+		spin_unlock(&infop->nvmet_ctx_list_lock);
+
+		/* Spread ctx structures evenly across all MRQs */
+		idx++;
+		if (idx >= phba->cfg_nvmet_mrq)
+			idx = 0;
+	}
+
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+			infop = lpfc_get_ctx_list(phba, i, j);
+			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+					"6408 TOTAL NVMET ctx for CPU %d "
+					"MRQ %d: cnt %d nextcpu %p\n",
+					i, j, infop->nvmet_ctx_list_cnt,
+					infop->nvmet_ctx_next_cpu);
+		}
+	}
+	return 0;
+}
+
+int
+lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
+{
+	struct lpfc_vport  *vport = phba->pport;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct nvmet_fc_port_info pinfo;
+	int error;
+
+	if (phba->targetport)
+		return 0;
+
+	error = lpfc_nvmet_setup_io_context(phba);
+	if (error)
+		return error;
+
+	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
+	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
+	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
+	pinfo.port_id = vport->fc_myDID;
+
+	/* Limit to LPFC_MAX_NVME_SEG_CNT.
+	 * For now need + 1 to get around NVME transport logic.
+	 */
+	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+				"6400 Reducing sg segment cnt to %d\n",
+				LPFC_MAX_NVME_SEG_CNT);
+		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+	} else {
+		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+	}
+	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
+	lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
+	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
+					   NVMET_FCTGTFEAT_CMD_IN_ISR |
+					   NVMET_FCTGTFEAT_OPDONE_IN_ISR;
+
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
+					     &phba->pcidev->dev,
+					     &phba->targetport);
+#else
+	error = -ENOENT;
+#endif
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+				"6025 Cannot register NVME targetport x%x: "
+				"portnm %llx nodenm %llx segs %d qs %d\n",
+				error,
+				pinfo.port_name, pinfo.node_name,
+				lpfc_tgttemplate.max_sgl_segments,
+				lpfc_tgttemplate.max_hw_queues);
+		phba->targetport = NULL;
+		phba->nvmet_support = 0;
+
+		lpfc_nvmet_cleanup_io_context(phba);
+
+	} else {
+		tgtp = (struct lpfc_nvmet_tgtport *)
+			phba->targetport->private;
+		tgtp->phba = phba;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+				"6026 Registered NVME "
+				"targetport: %p, private %p "
+				"portnm %llx nodenm %llx segs %d qs %d\n",
+				phba->targetport, tgtp,
+				pinfo.port_name, pinfo.node_name,
+				lpfc_tgttemplate.max_sgl_segments,
+				lpfc_tgttemplate.max_hw_queues);
+
+		atomic_set(&tgtp->rcv_ls_req_in, 0);
+		atomic_set(&tgtp->rcv_ls_req_out, 0);
+		atomic_set(&tgtp->rcv_ls_req_drop, 0);
+		atomic_set(&tgtp->xmt_ls_abort, 0);
+		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
+		atomic_set(&tgtp->xmt_ls_rsp, 0);
+		atomic_set(&tgtp->xmt_ls_drop, 0);
+		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
+		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
+		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
+		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
+		atomic_set(&tgtp->xmt_fcp_drop, 0);
+		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
+		atomic_set(&tgtp->xmt_fcp_read, 0);
+		atomic_set(&tgtp->xmt_fcp_write, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp, 0);
+		atomic_set(&tgtp->xmt_fcp_release, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+		atomic_set(&tgtp->xmt_fcp_abort, 0);
+		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
+		atomic_set(&tgtp->xmt_abort_unsol, 0);
+		atomic_set(&tgtp->xmt_abort_sol, 0);
+		atomic_set(&tgtp->xmt_abort_rsp, 0);
+		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
+	}
+	return error;
+}
+
+int
+lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
+{
+	struct lpfc_vport  *vport = phba->pport;
+
+	if (!phba->targetport)
+		return 0;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+			 "6007 Update NVMET port %p did x%x\n",
+			 phba->targetport, vport->fc_myDID);
+
+	phba->targetport->port_id = vport->fc_myDID;
+	return 0;
+}
+
+/**
+ * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the nvmet xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVMET aborted xri.
+ **/
+void
+lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+			    struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+	struct lpfc_nodelist *ndlp;
+	unsigned long iflag = 0;
+	int rrq_empty = 0;
+	bool released = false;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	list_for_each_entry_safe(ctxp, next_ctxp,
+				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+				 list) {
+		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+			continue;
+
+		/* Check if we already received a free context call
+		 * and we have completed processing an abort situation.
+		 */
+		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
+		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
+			list_del(&ctxp->list);
+			released = true;
+		}
+		ctxp->flag &= ~LPFC_NVMET_XBUSY;
+		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+
+		rrq_empty = list_empty(&phba->active_rrq_list);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
+		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+			lpfc_set_rrq_active(phba, ndlp,
+				ctxp->ctxbuf->sglq->sli4_lxritag,
+				rxid, 1);
+			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+		}
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6318 XB aborted oxid %x flg x%x (%x)\n",
+				ctxp->oxid, ctxp->flag, released);
+		if (released)
+			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+
+		if (rrq_empty)
+			lpfc_worker_wake_up(phba);
+		return;
+	}
+	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+int
+lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
+			   struct fc_frame_header *fc_hdr)
+
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+	struct nvmefc_tgt_fcp_req *rsp;
+	uint16_t xri;
+	unsigned long iflag = 0;
+
+	xri = be16_to_cpu(fc_hdr->fh_ox_id);
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	list_for_each_entry_safe(ctxp, next_ctxp,
+				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+				 list) {
+		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
+			continue;
+
+		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		spin_lock_irqsave(&ctxp->ctxlock, iflag);
+		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+		lpfc_nvmeio_data(phba,
+			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+			xri, smp_processor_id(), 0);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
+
+		rsp = &ctxp->ctx.fcp_req;
+		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
+
+		/* Respond with BA_ACC accordingly */
+		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+		return 0;
+	}
+	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+			 xri, smp_processor_id(), 1);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
+
+	/* Respond with BA_RJT accordingly */
+	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
+#endif
+	return 0;
+}
+
+void
+lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_nvmet_tgtport *tgtp;
+
+	if (phba->nvmet_support == 0)
+		return;
+	if (phba->targetport) {
+		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+		init_completion(&tgtp->tport_unreg_done);
+		nvmet_fc_unregister_targetport(phba->targetport);
+		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+		lpfc_nvmet_cleanup_io_context(phba);
+	}
+	phba->targetport = NULL;
+#endif
+}
+
+/**
+ * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ *
+ * This routine is used for processing the WQE associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited WQE. If not, it will create a new one with
+ * the DID from the unsolicited WQE. The ELS command from the unsolicited
+ * WQE is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			   struct hbq_dmabuf *nvmebuf)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	uint32_t *payload;
+	uint32_t size, oxid, sid, rc;
+
+	if (!nvmebuf || !phba->targetport) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6154 LS Drop IO\n");
+		oxid = 0;
+		size = 0;
+		sid = 0;
+		ctxp = NULL;
+		goto dropit;
+	}
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	payload = (uint32_t *)(nvmebuf->dbuf.virt);
+	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
+	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+	ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
+	if (ctxp == NULL) {
+		atomic_inc(&tgtp->rcv_ls_req_drop);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6155 LS Drop IO x%x: Alloc\n",
+				oxid);
+dropit:
+		lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
+				 "xri x%x sz %d from %06x\n",
+				 oxid, size, sid);
+		if (nvmebuf)
+			lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+		return;
+	}
+	ctxp->phba = phba;
+	ctxp->size = size;
+	ctxp->oxid = oxid;
+	ctxp->sid = sid;
+	ctxp->wqeq = NULL;
+	ctxp->state = LPFC_NVMET_STE_LS_RCV;
+	ctxp->entry_cnt = 1;
+	ctxp->rqb_buffer = (void *)nvmebuf;
+
+	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
+			 oxid, size, sid);
+	/*
+	 * The calling sequence should be:
+	 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
+	 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
+	 */
+	atomic_inc(&tgtp->rcv_ls_req_in);
+	rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
+				 payload, size);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+			"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
+			"%08x %08x %08x\n", size, rc,
+			*payload, *(payload+1), *(payload+2),
+			*(payload+3), *(payload+4), *(payload+5));
+
+	if (rc == 0) {
+		atomic_inc(&tgtp->rcv_ls_req_out);
+		return;
+	}
+
+	lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
+			 oxid, size, sid);
+
+	atomic_inc(&tgtp->rcv_ls_req_drop);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+			"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
+			ctxp->oxid, rc);
+
+	/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
+	if (nvmebuf)
+		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+
+	atomic_inc(&tgtp->xmt_ls_abort);
+	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
+#endif
+}
+
+static struct lpfc_nvmet_ctxbuf *
+lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
+			     struct lpfc_nvmet_ctx_info *current_infop)
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
+	struct lpfc_nvmet_ctx_info *get_infop;
+	int i;
+
+	/*
+	 * The current_infop for the MRQ a NVME command IU was received
+	 * on is empty. Our goal is to replenish this MRQs context
+	 * list from a another CPUs.
+	 *
+	 * First we need to pick a context list to start looking on.
+	 * nvmet_ctx_start_cpu has available context the last time
+	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
+	 * is just the next sequential CPU for this MRQ.
+	 */
+	if (current_infop->nvmet_ctx_start_cpu)
+		get_infop = current_infop->nvmet_ctx_start_cpu;
+	else
+		get_infop = current_infop->nvmet_ctx_next_cpu;
+
+	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+		if (get_infop == current_infop) {
+			get_infop = get_infop->nvmet_ctx_next_cpu;
+			continue;
+		}
+		spin_lock(&get_infop->nvmet_ctx_list_lock);
+
+		/* Just take the entire context list, if there are any */
+		if (get_infop->nvmet_ctx_list_cnt) {
+			list_splice_init(&get_infop->nvmet_ctx_list,
+				    &current_infop->nvmet_ctx_list);
+			current_infop->nvmet_ctx_list_cnt =
+				get_infop->nvmet_ctx_list_cnt - 1;
+			get_infop->nvmet_ctx_list_cnt = 0;
+			spin_unlock(&get_infop->nvmet_ctx_list_lock);
+
+			current_infop->nvmet_ctx_start_cpu = get_infop;
+			list_remove_head(&current_infop->nvmet_ctx_list,
+					 ctx_buf, struct lpfc_nvmet_ctxbuf,
+					 list);
+			return ctx_buf;
+		}
+
+		/* Otherwise, move on to the next CPU for this MRQ */
+		spin_unlock(&get_infop->nvmet_ctx_list_lock);
+		get_infop = get_infop->nvmet_ctx_next_cpu;
+	}
+
+#endif
+	/* Nothing found, all contexts for the MRQ are in-flight */
+	return NULL;
+}
+
+/**
+ * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @idx: relative index of MRQ vector
+ * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ *
+ * This routine is used for processing the WQE associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited WQE. If not, it will create a new one with
+ * the DID from the unsolicited WQE. The ELS command from the unsolicited
+ * WQE is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
+			    uint32_t idx,
+			    struct rqb_dmabuf *nvmebuf,
+			    uint64_t isr_timestamp)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_nvmet_ctxbuf *ctx_buf;
+	struct lpfc_nvmet_ctx_info *current_infop;
+	uint32_t *payload;
+	uint32_t size, oxid, sid, rc, qno;
+	unsigned long iflag;
+	int current_cpu;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t id;
+#endif
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
+	ctx_buf = NULL;
+	if (!nvmebuf || !phba->targetport) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6157 NVMET FCP Drop IO\n");
+		oxid = 0;
+		size = 0;
+		sid = 0;
+		ctxp = NULL;
+		goto dropit;
+	}
+
+	/*
+	 * Get a pointer to the context list for this MRQ based on
+	 * the CPU this MRQ IRQ is associated with. If the CPU association
+	 * changes from our initial assumption, the context list could
+	 * be empty, thus it would need to be replenished with the
+	 * context list from another CPU for this MRQ.
+	 */
+	current_cpu = smp_processor_id();
+	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
+	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
+	if (current_infop->nvmet_ctx_list_cnt) {
+		list_remove_head(&current_infop->nvmet_ctx_list,
+				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+		current_infop->nvmet_ctx_list_cnt--;
+	} else {
+		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
+	}
+	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
+
+	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+	size = nvmebuf->bytes_recv;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
+		id = smp_processor_id();
+		if (id < LPFC_CHECK_CPU_CNT)
+			phba->cpucheck_rcv_io[id]++;
+	}
+#endif
+
+	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
+			 oxid, size, smp_processor_id());
+
+	if (!ctx_buf) {
+		/* Queue this NVME IO to process later */
+		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
+		list_add_tail(&nvmebuf->hbuf.list,
+			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
+		phba->sli4_hba.nvmet_io_wait_cnt++;
+		phba->sli4_hba.nvmet_io_wait_total++;
+		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
+				       iflag);
+
+		/* Post a brand new DMA buffer to RQ */
+		qno = nvmebuf->idx;
+		lpfc_post_rq_buffer(
+			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
+			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
+		return;
+	}
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	payload = (uint32_t *)(nvmebuf->dbuf.virt);
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+	ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
+	if (ctxp->state != LPFC_NVMET_STE_FREE) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6414 NVMET Context corrupt %d %d oxid x%x\n",
+				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+	}
+	ctxp->wqeq = NULL;
+	ctxp->txrdy = NULL;
+	ctxp->offset = 0;
+	ctxp->phba = phba;
+	ctxp->size = size;
+	ctxp->oxid = oxid;
+	ctxp->sid = sid;
+	ctxp->idx = idx;
+	ctxp->state = LPFC_NVMET_STE_RCV;
+	ctxp->entry_cnt = 1;
+	ctxp->flag = 0;
+	ctxp->ctxbuf = ctx_buf;
+	spin_lock_init(&ctxp->ctxlock);
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on) {
+		ctxp->ts_isr_cmd = isr_timestamp;
+		ctxp->ts_cmd_nvme = ktime_get_ns();
+		ctxp->ts_nvme_data = 0;
+		ctxp->ts_data_wqput = 0;
+		ctxp->ts_isr_data = 0;
+		ctxp->ts_data_nvme = 0;
+		ctxp->ts_nvme_status = 0;
+		ctxp->ts_status_wqput = 0;
+		ctxp->ts_isr_status = 0;
+		ctxp->ts_status_nvme = 0;
+	}
+#endif
+
+	atomic_inc(&tgtp->rcv_fcp_cmd_in);
+	/*
+	 * The calling sequence should be:
+	 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
+	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
+	 * the NVME command / FC header is stored, so we are free to repost
+	 * the buffer.
+	 */
+	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+				  payload, size);
+
+	/* Process FCP command */
+	if (rc == 0) {
+		atomic_inc(&tgtp->rcv_fcp_cmd_out);
+		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+		return;
+	}
+
+	/* Processing of FCP command is deferred */
+	if (rc == -EOVERFLOW) {
+		lpfc_nvmeio_data(phba,
+				 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+				 oxid, size, sid);
+		/* defer reposting rcv buffer till .defer_rcv callback */
+		ctxp->rqb_buffer = nvmebuf;
+		atomic_inc(&tgtp->rcv_fcp_cmd_out);
+		return;
+	}
+
+	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+			"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
+			ctxp->oxid, rc,
+			atomic_read(&tgtp->rcv_fcp_cmd_in),
+			atomic_read(&tgtp->rcv_fcp_cmd_out),
+			atomic_read(&tgtp->xmt_fcp_release));
+dropit:
+	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
+			 oxid, size, sid);
+	if (oxid) {
+		lpfc_nvmet_defer_release(phba, ctxp);
+		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+		return;
+	}
+
+	if (ctx_buf)
+		lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
+
+	if (nvmebuf)
+		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
+/**
+ * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @nvmebuf: pointer to received nvme data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
+ * SLI RQ on which the unsolicited event was received.
+ **/
+void
+lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			  struct lpfc_iocbq *piocb)
+{
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *nvmebuf;
+
+	d_buf = piocb->context2;
+	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+
+	if (phba->nvmet_support == 0) {
+		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+		return;
+	}
+	lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
+}
+
+/**
+ * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
+ * @phba: pointer to lpfc hba data structure.
+ * @idx: relative index of MRQ vector
+ * @nvmebuf: pointer to received nvme data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
+ * SLI RQ on which the unsolicited event was received.
+ **/
+void
+lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
+			   uint32_t idx,
+			   struct rqb_dmabuf *nvmebuf,
+			   uint64_t isr_timestamp)
+{
+	if (phba->nvmet_support == 0) {
+		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
+		return;
+	}
+	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
+				    isr_timestamp);
+}
+
+/**
+ * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
+ * @phba: pointer to a host N_Port data structure.
+ * @ctxp: Context info for NVME LS Request
+ * @rspbuf: DMA buffer of NVME command.
+ * @rspsize: size of the NVME command.
+ *
+ * This routine is used for allocating a lpfc-WQE data structure from
+ * the driver lpfc-WQE free-list and prepare the WQE with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the NVME command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic WQE data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the WQE data structure for this WQE to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ *   Pointer to the newly allocated/prepared nvme wqe data structure
+ *   NULL - when nvme wqe data structure allocation/preparation failed
+ **/
+static struct lpfc_iocbq *
+lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
+		       struct lpfc_nvmet_rcv_ctx *ctxp,
+		       dma_addr_t rspbuf, uint16_t rspsize)
+{
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_iocbq *nvmewqe;
+	union lpfc_wqe *wqe;
+
+	if (!lpfc_is_link_up(phba)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+				"6104 NVMET prep LS wqe: link err: "
+				"NPORT x%x oxid:x%x ste %d\n",
+				ctxp->sid, ctxp->oxid, ctxp->state);
+		return NULL;
+	}
+
+	/* Allocate buffer for  command wqe */
+	nvmewqe = lpfc_sli_get_iocbq(phba);
+	if (nvmewqe == NULL) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+				"6105 NVMET prep LS wqe: No WQE: "
+				"NPORT x%x oxid x%x ste %d\n",
+				ctxp->sid, ctxp->oxid, ctxp->state);
+		return NULL;
+	}
+
+	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+				"6106 NVMET prep LS wqe: No ndlp: "
+				"NPORT x%x oxid x%x ste %d\n",
+				ctxp->sid, ctxp->oxid, ctxp->state);
+		goto nvme_wqe_free_wqeq_exit;
+	}
+	ctxp->wqeq = nvmewqe;
+
+	/* prevent preparing wqe with NULL ndlp reference */
+	nvmewqe->context1 = lpfc_nlp_get(ndlp);
+	if (nvmewqe->context1 == NULL)
+		goto nvme_wqe_free_wqeq_exit;
+	nvmewqe->context2 = ctxp;
+
+	wqe = &nvmewqe->wqe;
+	memset(wqe, 0, sizeof(union lpfc_wqe));
+
+	/* Words 0 - 2 */
+	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
+	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
+	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
+
+	/* Word 3 */
+
+	/* Word 4 */
+
+	/* Word 5 */
+	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
+	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
+	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
+	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
+	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
+
+	/* Word 6 */
+	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
+
+	/* Word 7 */
+	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
+	       CMD_XMIT_SEQUENCE64_WQE);
+	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
+	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
+	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+
+	/* Word 8 */
+	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
+
+	/* Word 9 */
+	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
+	/* Needs to be set by caller */
+	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
+
+	/* Word 10 */
+	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
+	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+	       LPFC_WQE_LENLOC_WORD12);
+	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+
+	/* Word 11 */
+	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
+	       LPFC_WQE_CQ_ID_DEFAULT);
+	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
+	       OTHER_COMMAND);
+
+	/* Word 12 */
+	wqe->xmit_sequence.xmit_len = rspsize;
+
+	nvmewqe->retry = 1;
+	nvmewqe->vport = phba->pport;
+	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
+	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
+
+	/* Xmit NVMET response to remote NPORT <did> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+			"6039 Xmit NVMET LS response to remote "
+			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
+			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
+			rspsize);
+	return nvmewqe;
+
+nvme_wqe_free_wqeq_exit:
+	nvmewqe->context2 = NULL;
+	nvmewqe->context3 = NULL;
+	lpfc_sli_release_iocbq(phba, nvmewqe);
+	return NULL;
+}
+
+
+static struct lpfc_iocbq *
+lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
+			struct lpfc_nvmet_rcv_ctx *ctxp)
+{
+	struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct sli4_sge *sgl;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_iocbq *nvmewqe;
+	struct scatterlist *sgel;
+	union lpfc_wqe128 *wqe;
+	uint32_t *txrdy;
+	dma_addr_t physaddr;
+	int i, cnt;
+	int xc = 1;
+
+	if (!lpfc_is_link_up(phba)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6107 NVMET prep FCP wqe: link err:"
+				"NPORT x%x oxid x%x ste %d\n",
+				ctxp->sid, ctxp->oxid, ctxp->state);
+		return NULL;
+	}
+
+	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6108 NVMET prep FCP wqe: no ndlp: "
+				"NPORT x%x oxid x%x ste %d\n",
+				ctxp->sid, ctxp->oxid, ctxp->state);
+		return NULL;
+	}
+
+	if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6109 NVMET prep FCP wqe: seg cnt err: "
+				"NPORT x%x oxid x%x ste %d cnt %d\n",
+				ctxp->sid, ctxp->oxid, ctxp->state,
+				phba->cfg_nvme_seg_cnt);
+		return NULL;
+	}
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	nvmewqe = ctxp->wqeq;
+	if (nvmewqe == NULL) {
+		/* Allocate buffer for  command wqe */
+		nvmewqe = ctxp->ctxbuf->iocbq;
+		if (nvmewqe == NULL) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6110 NVMET prep FCP wqe: No "
+					"WQE: NPORT x%x oxid x%x ste %d\n",
+					ctxp->sid, ctxp->oxid, ctxp->state);
+			return NULL;
+		}
+		ctxp->wqeq = nvmewqe;
+		xc = 0; /* create new XRI */
+		nvmewqe->sli4_lxritag = NO_XRI;
+		nvmewqe->sli4_xritag = NO_XRI;
+	}
+
+	/* Sanity check */
+	if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
+	    (ctxp->entry_cnt == 1)) ||
+	    (ctxp->state == LPFC_NVMET_STE_DATA)) {
+		wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+	} else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6111 Wrong state NVMET FCP: %d  cnt %d\n",
+				ctxp->state, ctxp->entry_cnt);
+		return NULL;
+	}
+
+	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
+	switch (rsp->op) {
+	case NVMET_FCOP_READDATA:
+	case NVMET_FCOP_READDATA_RSP:
+		/* Words 0 - 2 : The first sg segment */
+		sgel = &rsp->sg[0];
+		physaddr = sg_dma_address(sgel);
+		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
+		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
+		wqe->fcp_tsend.bde.addrHigh =
+			cpu_to_le32(putPaddrHigh(physaddr));
+
+		/* Word 3 */
+		wqe->fcp_tsend.payload_offset_len = 0;
+
+		/* Word 4 */
+		wqe->fcp_tsend.relative_offset = ctxp->offset;
+
+		/* Word 5 */
+
+		/* Word 6 */
+		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
+		       nvmewqe->sli4_xritag);
+
+		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
+		bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
+
+		/* Word 8 */
+		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
+
+		/* Word 9 */
+		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
+		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
+
+		/* Word 10 */
+		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+		bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
+		       LPFC_WQE_LENLOC_WORD12);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
+		bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
+		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+		if (phba->cfg_nvme_oas)
+			bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
+
+		/* Word 11 */
+		bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
+		       LPFC_WQE_CQ_ID_DEFAULT);
+		bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
+		       FCP_COMMAND_TSEND);
+
+		/* Word 12 */
+		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
+
+		/* Setup 2 SKIP SGEs */
+		sgl->addr_hi = 0;
+		sgl->addr_lo = 0;
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = 0;
+		sgl++;
+		sgl->addr_hi = 0;
+		sgl->addr_lo = 0;
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = 0;
+		sgl++;
+		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
+			atomic_inc(&tgtp->xmt_fcp_read_rsp);
+			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
+			if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
+			    (rsp->rsplen == 12)) {
+				bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
+				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
+				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
+				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
+			} else {
+				bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
+				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
+				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
+				       ((rsp->rsplen >> 2) - 1));
+				memcpy(&wqe->words[16], rsp->rspaddr,
+				       rsp->rsplen);
+			}
+		} else {
+			atomic_inc(&tgtp->xmt_fcp_read);
+
+			bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+			bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
+			bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
+			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
+			bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
+		}
+		break;
+
+	case NVMET_FCOP_WRITEDATA:
+		/* Words 0 - 2 : The first sg segment */
+		txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
+				       GFP_KERNEL, &physaddr);
+		if (!txrdy) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+					"6041 Bad txrdy buffer: oxid x%x\n",
+					ctxp->oxid);
+			return NULL;
+		}
+		ctxp->txrdy = txrdy;
+		ctxp->txrdy_phys = physaddr;
+		wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
+		wqe->fcp_treceive.bde.addrLow =
+			cpu_to_le32(putPaddrLow(physaddr));
+		wqe->fcp_treceive.bde.addrHigh =
+			cpu_to_le32(putPaddrHigh(physaddr));
+
+		/* Word 3 */
+		wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
+
+		/* Word 4 */
+		wqe->fcp_treceive.relative_offset = ctxp->offset;
+
+		/* Word 5 */
+
+		/* Word 6 */
+		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
+		       nvmewqe->sli4_xritag);
+
+		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
+		bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
+		bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
+		       CMD_FCP_TRECEIVE64_WQE);
+
+		/* Word 8 */
+		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
+
+		/* Word 9 */
+		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
+		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
+
+		/* Word 10 */
+		bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+		bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
+		       LPFC_WQE_LENLOC_WORD12);
+		bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
+		bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
+		bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
+		bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
+		bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+		if (phba->cfg_nvme_oas)
+			bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
+
+		/* Word 11 */
+		bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
+		       LPFC_WQE_CQ_ID_DEFAULT);
+		bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
+		       FCP_COMMAND_TRECEIVE);
+		bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+
+		/* Word 12 */
+		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
+
+		/* Setup 1 TXRDY and 1 SKIP SGE */
+		txrdy[0] = 0;
+		txrdy[1] = cpu_to_be32(rsp->transfer_length);
+		txrdy[2] = 0;
+
+		sgl->addr_hi = putPaddrHigh(physaddr);
+		sgl->addr_lo = putPaddrLow(physaddr);
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+		sgl++;
+		sgl->addr_hi = 0;
+		sgl->addr_lo = 0;
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = 0;
+		sgl++;
+		atomic_inc(&tgtp->xmt_fcp_write);
+		break;
+
+	case NVMET_FCOP_RSP:
+		/* Words 0 - 2 */
+		physaddr = rsp->rspdma;
+		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
+		wqe->fcp_trsp.bde.addrLow =
+			cpu_to_le32(putPaddrLow(physaddr));
+		wqe->fcp_trsp.bde.addrHigh =
+			cpu_to_le32(putPaddrHigh(physaddr));
+
+		/* Word 3 */
+		wqe->fcp_trsp.response_len = rsp->rsplen;
+
+		/* Word 4 */
+		wqe->fcp_trsp.rsvd_4_5[0] = 0;
+
+
+		/* Word 5 */
+
+		/* Word 6 */
+		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
+		       nvmewqe->sli4_xritag);
+
+		/* Word 7 */
+		bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
+		bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
+		bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
+
+		/* Word 8 */
+		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
+
+		/* Word 9 */
+		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
+		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
+
+		/* Word 10 */
+		bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+		bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
+		bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
+		bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+		if (phba->cfg_nvme_oas)
+			bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
+
+		/* Word 11 */
+		bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
+		       LPFC_WQE_CQ_ID_DEFAULT);
+		bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
+		       FCP_COMMAND_TRSP);
+		bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+
+		if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
+			/* Good response - all zero's on wire */
+			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
+			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
+			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
+		} else {
+			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
+			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
+			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
+			       ((rsp->rsplen >> 2) - 1));
+			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
+		}
+
+		/* Use rspbuf, NOT sg list */
+		rsp->sg_cnt = 0;
+		sgl->word2 = 0;
+		atomic_inc(&tgtp->xmt_fcp_rsp);
+		break;
+
+	default:
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+				"6064 Unknown Rsp Op %d\n",
+				rsp->op);
+		return NULL;
+	}
+
+	nvmewqe->retry = 1;
+	nvmewqe->vport = phba->pport;
+	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
+	nvmewqe->context1 = ndlp;
+
+	for (i = 0; i < rsp->sg_cnt; i++) {
+		sgel = &rsp->sg[i];
+		physaddr = sg_dma_address(sgel);
+		cnt = sg_dma_len(sgel);
+		sgl->addr_hi = putPaddrHigh(physaddr);
+		sgl->addr_lo = putPaddrLow(physaddr);
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
+		if ((i+1) == rsp->sg_cnt)
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(cnt);
+		sgl++;
+		ctxp->offset += cnt;
+	}
+	ctxp->state = LPFC_NVMET_STE_DATA;
+	ctxp->entry_cnt++;
+	return nvmewqe;
+}
+
+/**
+ * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME ABTS for FCP cmds
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	struct lpfc_nvmet_tgtport *tgtp;
+	uint32_t status, result;
+	unsigned long flags;
+	bool released = false;
+
+	ctxp = cmdwqe->context2;
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	result = wcqe->parameter;
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
+
+	ctxp->state = LPFC_NVMET_STE_DONE;
+
+	/* Check if we already received a free context call
+	 * and we have completed processing an abort situation.
+	 */
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
+	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
+	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+		list_del(&ctxp->list);
+		released = true;
+	}
+	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+	atomic_inc(&tgtp->xmt_abort_rsp);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+			"6165 ABORT cmpl: xri x%x flg x%x (%d) "
+			"WCQE: %08x %08x %08x %08x\n",
+			ctxp->oxid, ctxp->flag, released,
+			wcqe->word0, wcqe->total_data_placed,
+			result, wcqe->word3);
+
+	cmdwqe->context2 = NULL;
+	cmdwqe->context3 = NULL;
+	/*
+	 * if transport has released ctx, then can reuse it. Otherwise,
+	 * will be recycled by transport release call.
+	 */
+	if (released)
+		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+
+	/* This is the iocbq for the abort, not the command */
+	lpfc_sli_release_iocbq(phba, cmdwqe);
+
+	/* Since iaab/iaar are NOT set, there is no work left.
+	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+	 * should have been called already.
+	 */
+}
+
+/**
+ * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME ABTS for FCP cmds
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+			       struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	struct lpfc_nvmet_tgtport *tgtp;
+	unsigned long flags;
+	uint32_t status, result;
+	bool released = false;
+
+	ctxp = cmdwqe->context2;
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	result = wcqe->parameter;
+
+	if (!ctxp) {
+		/* if context is clear, related io alrady complete */
+		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
+				wcqe->word0, wcqe->total_data_placed,
+				result, wcqe->word3);
+		return;
+	}
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
+
+	/* Sanity check */
+	if (ctxp->state != LPFC_NVMET_STE_ABORT) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+				"6112 ABTS Wrong state:%d oxid x%x\n",
+				ctxp->state, ctxp->oxid);
+	}
+
+	/* Check if we already received a free context call
+	 * and we have completed processing an abort situation.
+	 */
+	ctxp->state = LPFC_NVMET_STE_DONE;
+	spin_lock_irqsave(&ctxp->ctxlock, flags);
+	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
+	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+		list_del(&ctxp->list);
+		released = true;
+	}
+	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+	atomic_inc(&tgtp->xmt_abort_rsp);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6316 ABTS cmpl xri x%x flg x%x (%x) "
+			"WCQE: %08x %08x %08x %08x\n",
+			ctxp->oxid, ctxp->flag, released,
+			wcqe->word0, wcqe->total_data_placed,
+			result, wcqe->word3);
+
+	cmdwqe->context2 = NULL;
+	cmdwqe->context3 = NULL;
+	/*
+	 * if transport has released ctx, then can reuse it. Otherwise,
+	 * will be recycled by transport release call.
+	 */
+	if (released)
+		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
+
+	/* Since iaab/iaar are NOT set, there is no work left.
+	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+	 * should have been called already.
+	 */
+}
+
+/**
+ * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME ABTS for LS cmds
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+			    struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	struct lpfc_nvmet_tgtport *tgtp;
+	uint32_t status, result;
+
+	ctxp = cmdwqe->context2;
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	result = wcqe->parameter;
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	atomic_inc(&tgtp->xmt_ls_abort_cmpl);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
+			ctxp, wcqe->word0, wcqe->total_data_placed,
+			result, wcqe->word3);
+
+	if (!ctxp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+				"6415 NVMET LS Abort No ctx: WCQE: "
+				 "%08x %08x %08x %08x\n",
+				wcqe->word0, wcqe->total_data_placed,
+				result, wcqe->word3);
+
+		lpfc_sli_release_iocbq(phba, cmdwqe);
+		return;
+	}
+
+	if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6416 NVMET LS abort cmpl state mismatch: "
+				"oxid x%x: %d %d\n",
+				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+	}
+
+	cmdwqe->context2 = NULL;
+	cmdwqe->context3 = NULL;
+	lpfc_sli_release_iocbq(phba, cmdwqe);
+	kfree(ctxp);
+}
+
+static int
+lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
+			     struct lpfc_nvmet_rcv_ctx *ctxp,
+			     uint32_t sid, uint16_t xri)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_iocbq *abts_wqeq;
+	union lpfc_wqe *wqe_abts;
+	struct lpfc_nodelist *ndlp;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6067 ABTS: sid %x xri x%x/x%x\n",
+			sid, xri, ctxp->wqeq->sli4_xritag);
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
+	ndlp = lpfc_findnode_did(phba->pport, sid);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+		atomic_inc(&tgtp->xmt_abort_rsp_error);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+				"6134 Drop ABTS - wrong NDLP state x%x.\n",
+				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
+
+		/* No failure to an ABTS request. */
+		return 0;
+	}
+
+	abts_wqeq = ctxp->wqeq;
+	wqe_abts = &abts_wqeq->wqe;
+
+	/*
+	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
+	 * that were initialized in lpfc_sli4_nvmet_alloc.
+	 */
+	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
+
+	/* Word 5 */
+	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
+	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
+	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
+	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
+	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
+
+	/* Word 6 */
+	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
+	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
+	       abts_wqeq->sli4_xritag);
+
+	/* Word 7 */
+	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
+	       CMD_XMIT_SEQUENCE64_WQE);
+	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
+	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
+	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
+
+	/* Word 8 */
+	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
+
+	/* Word 9 */
+	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
+	/* Needs to be set by caller */
+	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
+
+	/* Word 10 */
+	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
+	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
+	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
+	       LPFC_WQE_LENLOC_WORD12);
+	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
+	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
+
+	/* Word 11 */
+	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
+	       LPFC_WQE_CQ_ID_DEFAULT);
+	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
+	       OTHER_COMMAND);
+
+	abts_wqeq->vport = phba->pport;
+	abts_wqeq->context1 = ndlp;
+	abts_wqeq->context2 = ctxp;
+	abts_wqeq->context3 = NULL;
+	abts_wqeq->rsvd2 = 0;
+	/* hba_wqidx should already be setup from command we are aborting */
+	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+	abts_wqeq->iocb.ulpLe = 1;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6069 Issue ABTS to xri x%x reqtag x%x\n",
+			xri, abts_wqeq->iotag);
+	return 1;
+}
+
+static int
+lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
+			       struct lpfc_nvmet_rcv_ctx *ctxp,
+			       uint32_t sid, uint16_t xri)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_iocbq *abts_wqeq;
+	union lpfc_wqe *abts_wqe;
+	struct lpfc_nodelist *ndlp;
+	unsigned long flags;
+	int rc;
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	if (!ctxp->wqeq) {
+		ctxp->wqeq = ctxp->ctxbuf->iocbq;
+		ctxp->wqeq->hba_wqidx = 0;
+	}
+
+	ndlp = lpfc_findnode_did(phba->pport, sid);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+		atomic_inc(&tgtp->xmt_abort_rsp_error);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+				"6160 Drop ABORT - wrong NDLP state x%x.\n",
+				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
+
+		/* No failure to an ABTS request. */
+		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		return 0;
+	}
+
+	/* Issue ABTS for this WQE based on iotag */
+	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
+	if (!ctxp->abort_wqeq) {
+		atomic_inc(&tgtp->xmt_abort_rsp_error);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+				"6161 ABORT failed: No wqeqs: "
+				"xri: x%x\n", ctxp->oxid);
+		/* No failure to an ABTS request. */
+		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		return 0;
+	}
+	abts_wqeq = ctxp->abort_wqeq;
+	abts_wqe = &abts_wqeq->wqe;
+	ctxp->state = LPFC_NVMET_STE_ABORT;
+
+	/* Announce entry to new IO submit field. */
+	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+			"6162 ABORT Request to rport DID x%06x "
+			"for xri x%x x%x\n",
+			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
+
+	/* If the hba is getting reset, this flag is set.  It is
+	 * cleared when the reset is complete and rings reestablished.
+	 */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	/* driver queued commands are in process of being flushed */
+	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		atomic_inc(&tgtp->xmt_abort_rsp_error);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+				"6163 Driver in reset cleanup - flushing "
+				"NVME Req now. hba_flag x%x oxid x%x\n",
+				phba->hba_flag, ctxp->oxid);
+		lpfc_sli_release_iocbq(phba, abts_wqeq);
+		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		return 0;
+	}
+
+	/* Outstanding abort is in progress */
+	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		atomic_inc(&tgtp->xmt_abort_rsp_error);
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+				"6164 Outstanding NVME I/O Abort Request "
+				"still pending on oxid x%x\n",
+				ctxp->oxid);
+		lpfc_sli_release_iocbq(phba, abts_wqeq);
+		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+		return 0;
+	}
+
+	/* Ready - mark outstanding as aborted by driver. */
+	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+	/* WQEs are reused.  Clear stale data and set key fields to
+	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+	 */
+	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+
+	/* word 3 */
+	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
+
+	/* word 7 */
+	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
+	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+	/* word 8 - tell the FW to abort the IO associated with this
+	 * outstanding exchange ID.
+	 */
+	abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
+
+	/* word 9 - this is the iotag for the abts_wqe completion. */
+	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
+	       abts_wqeq->iotag);
+
+	/* word 10 */
+	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
+	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+	/* word 11 */
+	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
+	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
+	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
+	abts_wqeq->iocb_cmpl = 0;
+	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
+	abts_wqeq->context2 = ctxp;
+	abts_wqeq->vport = phba->pport;
+	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (rc == WQE_SUCCESS) {
+		atomic_inc(&tgtp->xmt_abort_sol);
+		return 0;
+	}
+
+	atomic_inc(&tgtp->xmt_abort_rsp_error);
+	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+	lpfc_sli_release_iocbq(phba, abts_wqeq);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+			"6166 Failed ABORT issue_wqe with status x%x "
+			"for oxid x%x.\n",
+			rc, ctxp->oxid);
+	return 1;
+}
+
+
+static int
+lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
+				 struct lpfc_nvmet_rcv_ctx *ctxp,
+				 uint32_t sid, uint16_t xri)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_iocbq *abts_wqeq;
+	unsigned long flags;
+	int rc;
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	if (!ctxp->wqeq) {
+		ctxp->wqeq = ctxp->ctxbuf->iocbq;
+		ctxp->wqeq->hba_wqidx = 0;
+	}
+
+	if (ctxp->state == LPFC_NVMET_STE_FREE) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
+				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+		rc = WQE_BUSY;
+		goto aerr;
+	}
+	ctxp->state = LPFC_NVMET_STE_ABORT;
+	ctxp->entry_cnt++;
+	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
+	if (rc == 0)
+		goto aerr;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	abts_wqeq = ctxp->wqeq;
+	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
+	abts_wqeq->iocb_cmpl = NULL;
+	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
+	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (rc == WQE_SUCCESS) {
+		return 0;
+	}
+
+aerr:
+	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+	atomic_inc(&tgtp->xmt_abort_rsp_error);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+			"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
+			ctxp->oxid, rc);
+	return 1;
+}
+
+static int
+lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
+				struct lpfc_nvmet_rcv_ctx *ctxp,
+				uint32_t sid, uint16_t xri)
+{
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct lpfc_iocbq *abts_wqeq;
+	union lpfc_wqe *wqe_abts;
+	unsigned long flags;
+	int rc;
+
+	if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
+	    (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
+		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+		ctxp->entry_cnt++;
+	} else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+				"6418 NVMET LS abort state mismatch "
+				"IO x%x: %d %d\n",
+				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+	}
+
+	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+	if (!ctxp->wqeq) {
+		/* Issue ABTS for this WQE based on iotag */
+		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
+		if (!ctxp->wqeq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+					"6068 Abort failed: No wqeqs: "
+					"xri: x%x\n", xri);
+			/* No failure to an ABTS request. */
+			kfree(ctxp);
+			return 0;
+		}
+	}
+	abts_wqeq = ctxp->wqeq;
+	wqe_abts = &abts_wqeq->wqe;
+
+	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
+		rc = WQE_BUSY;
+		goto out;
+	}
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
+	abts_wqeq->iocb_cmpl = 0;
+	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
+	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (rc == WQE_SUCCESS) {
+		atomic_inc(&tgtp->xmt_abort_unsol);
+		return 0;
+	}
+out:
+	atomic_inc(&tgtp->xmt_abort_rsp_error);
+	abts_wqeq->context2 = NULL;
+	abts_wqeq->context3 = NULL;
+	lpfc_sli_release_iocbq(phba, abts_wqeq);
+	kfree(ctxp);
+	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+			"6056 Failed to Issue ABTS. Status x%x\n", rc);
+	return 0;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvmet.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvmet.h
new file mode 100644
index 0000000..25a65b0
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -0,0 +1,140 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ ********************************************************************/
+
+#define LPFC_NVMET_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
+#define LPFC_NVMET_RQE_DEF_COUNT	512
+#define LPFC_NVMET_SUCCESS_LEN	12
+
+/* Used for NVME Target */
+struct lpfc_nvmet_tgtport {
+	struct lpfc_hba *phba;
+	struct completion tport_unreg_done;
+
+	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
+	atomic_t rcv_ls_req_in;
+	atomic_t rcv_ls_req_out;
+	atomic_t rcv_ls_req_drop;
+	atomic_t xmt_ls_abort;
+	atomic_t xmt_ls_abort_cmpl;
+
+	/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
+	atomic_t xmt_ls_rsp;
+	atomic_t xmt_ls_drop;
+
+	/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
+	atomic_t xmt_ls_rsp_error;
+	atomic_t xmt_ls_rsp_cmpl;
+
+	/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
+	atomic_t rcv_fcp_cmd_in;
+	atomic_t rcv_fcp_cmd_out;
+	atomic_t rcv_fcp_cmd_drop;
+	atomic_t rcv_fcp_cmd_defer;
+	atomic_t xmt_fcp_release;
+
+	/* Stats counters - lpfc_nvmet_xmt_fcp_op */
+	atomic_t xmt_fcp_drop;
+	atomic_t xmt_fcp_read_rsp;
+	atomic_t xmt_fcp_read;
+	atomic_t xmt_fcp_write;
+	atomic_t xmt_fcp_rsp;
+
+	/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+	atomic_t xmt_fcp_rsp_cmpl;
+	atomic_t xmt_fcp_rsp_error;
+	atomic_t xmt_fcp_rsp_drop;
+
+
+	/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+	atomic_t xmt_fcp_abort;
+	atomic_t xmt_fcp_abort_cmpl;
+	atomic_t xmt_abort_sol;
+	atomic_t xmt_abort_unsol;
+	atomic_t xmt_abort_rsp;
+	atomic_t xmt_abort_rsp_error;
+};
+
+struct lpfc_nvmet_ctx_info {
+	struct list_head nvmet_ctx_list;
+	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
+	uint16_t	nvmet_ctx_list_cnt;
+	char pad[16];  /* pad to a cache-line */
+};
+
+/* This retrieves the context info associated with the specified cpu / mrq */
+#define lpfc_get_ctx_list(phba, cpu, mrq)  \
+	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
+
+struct lpfc_nvmet_rcv_ctx {
+	union {
+		struct nvmefc_tgt_ls_req ls_req;
+		struct nvmefc_tgt_fcp_req fcp_req;
+	} ctx;
+	struct list_head list;
+	struct lpfc_hba *phba;
+	struct lpfc_iocbq *wqeq;
+	struct lpfc_iocbq *abort_wqeq;
+	dma_addr_t txrdy_phys;
+	spinlock_t ctxlock; /* protect flag access */
+	uint32_t *txrdy;
+	uint32_t sid;
+	uint32_t offset;
+	uint16_t oxid;
+	uint16_t size;
+	uint16_t entry_cnt;
+	uint16_t cpu;
+	uint16_t idx;
+	uint16_t state;
+	/* States */
+#define LPFC_NVMET_STE_LS_RCV		1
+#define LPFC_NVMET_STE_LS_ABORT		2
+#define LPFC_NVMET_STE_LS_RSP		3
+#define LPFC_NVMET_STE_RCV		4
+#define LPFC_NVMET_STE_DATA		5
+#define LPFC_NVMET_STE_ABORT		6
+#define LPFC_NVMET_STE_DONE		7
+#define LPFC_NVMET_STE_FREE		0xff
+	uint16_t flag;
+#define LPFC_NVMET_IO_INP		0x1  /* IO is in progress on exchange */
+#define LPFC_NVMET_ABORT_OP		0x2  /* Abort WQE issued on exchange */
+#define LPFC_NVMET_XBUSY		0x4  /* XB bit set on IO cmpl */
+#define LPFC_NVMET_CTX_RLS		0x8  /* ctx free requested */
+#define LPFC_NVMET_ABTS_RCV		0x10  /* ABTS received on exchange */
+	struct rqb_dmabuf *rqb_buffer;
+	struct lpfc_nvmet_ctxbuf *ctxbuf;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint64_t ts_isr_cmd;
+	uint64_t ts_cmd_nvme;
+	uint64_t ts_nvme_data;
+	uint64_t ts_data_wqput;
+	uint64_t ts_isr_data;
+	uint64_t ts_data_nvme;
+	uint64_t ts_nvme_status;
+	uint64_t ts_status_wqput;
+	uint64_t ts_isr_status;
+	uint64_t ts_status_nvme;
+#endif
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_scsi.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_scsi.c
new file mode 100644
index 0000000..2eba0c3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_scsi.c
@@ -0,0 +1,6067 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+#include <linux/t10-pi.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+
+#define LPFC_RESET_WAIT  2
+#define LPFC_ABORT_WAIT  2
+
+int _dump_buf_done = 1;
+
+static char *dif_op_str[] = {
+	"PROT_NORMAL",
+	"PROT_READ_INSERT",
+	"PROT_WRITE_STRIP",
+	"PROT_READ_STRIP",
+	"PROT_WRITE_INSERT",
+	"PROT_READ_PASS",
+	"PROT_WRITE_PASS",
+};
+
+struct scsi_dif_tuple {
+	__be16 guard_tag;       /* Checksum */
+	__be16 app_tag;         /* Opaque storage */
+	__be32 ref_tag;         /* Target LBA or indirect LBA */
+};
+
+static struct lpfc_rport_data *
+lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
+
+	if (vport->phba->cfg_fof)
+		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
+	else
+		return (struct lpfc_rport_data *)sdev->hostdata;
+}
+
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
+
+static void
+lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
+{
+	void *src, *dst;
+	struct scatterlist *sgde = scsi_sglist(cmnd);
+
+	if (!_dump_buf_data) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
+				__func__);
+		return;
+	}
+
+
+	if (!sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9051 BLKGRD: ERROR: data scatterlist is null\n");
+		return;
+	}
+
+	dst = (void *) _dump_buf_data;
+	while (sgde) {
+		src = sg_virt(sgde);
+		memcpy(dst, src, sgde->length);
+		dst += sgde->length;
+		sgde = sg_next(sgde);
+	}
+}
+
+static void
+lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
+{
+	void *src, *dst;
+	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
+
+	if (!_dump_buf_dif) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
+				__func__);
+		return;
+	}
+
+	if (!sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
+		return;
+	}
+
+	dst = _dump_buf_dif;
+	while (sgde) {
+		src = sg_virt(sgde);
+		memcpy(dst, src, sgde->length);
+		dst += sgde->length;
+		sgde = sg_next(sgde);
+	}
+}
+
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+	return sc->device->sector_size;
+}
+
+#define LPFC_CHECK_PROTECT_GUARD	1
+#define LPFC_CHECK_PROTECT_REF		2
+static inline unsigned
+lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
+{
+	return 1;
+}
+
+static inline unsigned
+lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
+{
+	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
+		return 0;
+	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
+		return 1;
+	return 0;
+}
+
+/**
+ * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called from the lpfc_prep_task_mgmt_cmd function to
+ * set the last bit in the response sge entry.
+ **/
+static void
+lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
+				struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	if (sgl) {
+		sgl += 1;
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+	}
+}
+
+/**
+ * lpfc_update_stats - Update statistical data for the command completion
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called when there is a command completion and this
+ * function updates the statistical data for the command completion.
+ **/
+static void
+lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
+{
+	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	unsigned long flags;
+	struct Scsi_Host  *shost = cmd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	unsigned long latency;
+	int i;
+
+	if (cmd->result)
+		return;
+
+	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (!vport->stat_data_enabled ||
+		vport->stat_data_blocked ||
+		!pnode ||
+		!pnode->lat_data ||
+		(phba->bucket_type == LPFC_NO_BUCKET)) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		return;
+	}
+
+	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
+		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
+			phba->bucket_step;
+		/* check array subscript bounds */
+		if (i < 0)
+			i = 0;
+		else if (i >= LPFC_MAX_BUCKET_COUNT)
+			i = LPFC_MAX_BUCKET_COUNT - 1;
+	} else {
+		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
+			if (latency <= (phba->bucket_base +
+				((1<<i)*phba->bucket_step)))
+				break;
+	}
+
+	pnode->lat_data[i].cmd_count++;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called when there is resource error in driver or firmware.
+ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
+ * posts at most 1 event each second. This routine wakes up worker thread of
+ * @phba to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
+void
+lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+{
+	unsigned long flags;
+	uint32_t evt_posted;
+	unsigned long expires;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	atomic_inc(&phba->num_rsrc_err);
+	phba->last_rsrc_error_time = jiffies;
+
+	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
+	if (time_after(expires, jiffies)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	phba->last_ramp_down_time = jiffies;
+
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+	if (!evt_posted)
+		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
+ * thread.This routine reduces queue depth for all scsi device on each vport
+ * associated with @phba.
+ **/
+void
+lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	struct scsi_device *sdev;
+	unsigned long new_queue_depth;
+	unsigned long num_rsrc_err, num_cmd_success;
+	int i;
+
+	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+	num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+	/*
+	 * The error and success command counters are global per
+	 * driver instance.  If another handler has already
+	 * operated on this error event, just exit.
+	 */
+	if (num_rsrc_err == 0)
+		return;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			shost_for_each_device(sdev, shost) {
+				new_queue_depth =
+					sdev->queue_depth * num_rsrc_err /
+					(num_rsrc_err + num_cmd_success);
+				if (!new_queue_depth)
+					new_queue_depth = sdev->queue_depth - 1;
+				else
+					new_queue_depth = sdev->queue_depth -
+								new_queue_depth;
+				scsi_change_queue_depth(sdev, new_queue_depth);
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	atomic_set(&phba->num_rsrc_err, 0);
+	atomic_set(&phba->num_cmd_success, 0);
+}
+
+/**
+ * lpfc_scsi_dev_block - set all scsi hosts to block state
+ * @phba: Pointer to HBA context object.
+ *
+ * This function walks vport list and set each SCSI host to block state
+ * by invoking fc_remote_port_delete() routine. This function is invoked
+ * with EEH when device's PCI slot has been permanently disabled.
+ **/
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	struct scsi_device *sdev;
+	struct fc_rport *rport;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			shost_for_each_device(sdev, shost) {
+				rport = starget_to_rport(scsi_target(sdev));
+				fc_remote_port_delete(rport);
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-3 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. The non-DMAable buffer region contains information to build
+ * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
+ * and the initial BPL. In addition to allocating memory, the FCP CMND and
+ * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb;
+	struct ulp_bde64 *bpl;
+	IOCB_t *iocb;
+	dma_addr_t pdma_phys_fcp_cmd;
+	dma_addr_t pdma_phys_fcp_rsp;
+	dma_addr_t pdma_phys_bpl;
+	uint16_t iotag;
+	int bcnt, bpl_size;
+
+	bpl_size = phba->cfg_sg_dma_buf_size -
+		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+			 num_to_alloc, phba->cfg_sg_dma_buf_size,
+			 (int)sizeof(struct fcp_cmnd),
+			 (int)sizeof(struct fcp_rsp), bpl_size);
+
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		if (!psb)
+			break;
+
+		/*
+		 * Get memory from the pci pool to map the virt space to pci
+		 * bus space for an I/O.  The DMA buffer includes space for the
+		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+		 * necessary to support the sg_tablesize.
+		 */
+		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+					GFP_KERNEL, &psb->dma_handle);
+		if (!psb->data) {
+			kfree(psb);
+			break;
+		}
+
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+		psb->fcp_cmnd = psb->data;
+		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp);
+
+		/* Initialize local short-hand pointers. */
+		bpl = psb->fcp_bpl;
+		pdma_phys_fcp_cmd = psb->dma_handle;
+		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp);
+
+		/*
+		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+		 * are sg list bdes.  Initialize the first two and leave the
+		 * rest for queuecommand.
+		 */
+		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
+
+		/* Setup the physical region for the FCP RSP */
+		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+
+		/*
+		 * Since the IOCB for the FCP I/O is built into this
+		 * lpfc_scsi_buf, initialize it with all known data now.
+		 */
+		iocb = &psb->cur_iocbq.iocb;
+		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+		if ((phba->sli_rev == 3) &&
+				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+			/* fill in immediate fcp command BDE */
+			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+					unsli3.fcp_ext.icd);
+			iocb->un.fcpi64.bdl.addrHigh = 0;
+			iocb->ulpBdeCount = 0;
+			iocb->ulpLe = 0;
+			/* fill in response BDE */
+			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
+							BUFF_TYPE_BDE_64;
+			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+				sizeof(struct fcp_rsp);
+			iocb->unsli3.fcp_ext.rbde.addrLow =
+				putPaddrLow(pdma_phys_fcp_rsp);
+			iocb->unsli3.fcp_ext.rbde.addrHigh =
+				putPaddrHigh(pdma_phys_fcp_rsp);
+		} else {
+			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+			iocb->un.fcpi64.bdl.bdeSize =
+					(2 * sizeof(struct ulp_bde64));
+			iocb->un.fcpi64.bdl.addrLow =
+					putPaddrLow(pdma_phys_bpl);
+			iocb->un.fcpi64.bdl.addrHigh =
+					putPaddrHigh(pdma_phys_bpl);
+			iocb->ulpBdeCount = 1;
+			iocb->ulpLe = 1;
+		}
+		iocb->ulpClass = CLASS3;
+		psb->status = IOSTAT_SUCCESS;
+		/* Put it back into the SCSI buffer list */
+		psb->cur_iocbq.context1  = psb;
+		lpfc_release_scsi_buf_s3(phba, psb);
+
+	}
+
+	return bcnt;
+}
+
+/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb, *next_psb;
+	unsigned long iflag = 0;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_for_each_entry_safe(psb, next_psb,
+				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		if (psb->rdata && psb->rdata->pnode
+			&& psb->rdata->pnode->vport == vport)
+			psb->rdata = NULL;
+	}
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+	struct lpfc_scsi_buf *psb, *next_psb;
+	unsigned long iflag = 0;
+	struct lpfc_iocbq *iocbq;
+	int i;
+	struct lpfc_nodelist *ndlp;
+	int rrq_empty = 0;
+	struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
+
+	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_for_each_entry_safe(psb, next_psb,
+		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		if (psb->cur_iocbq.sli4_xritag == xri) {
+			list_del(&psb->list);
+			psb->exch_busy = 0;
+			psb->status = IOSTAT_SUCCESS;
+			spin_unlock(
+				&phba->sli4_hba.abts_scsi_buf_list_lock);
+			if (psb->rdata && psb->rdata->pnode)
+				ndlp = psb->rdata->pnode;
+			else
+				ndlp = NULL;
+
+			rrq_empty = list_empty(&phba->active_rrq_list);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			if (ndlp) {
+				lpfc_set_rrq_active(phba, ndlp,
+					psb->cur_iocbq.sli4_lxritag, rxid, 1);
+				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+			}
+			lpfc_release_scsi_buf_s4(phba, psb);
+			if (rrq_empty)
+				lpfc_worker_wake_up(phba);
+			return;
+		}
+	}
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	for (i = 1; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
+			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
+			continue;
+		if (iocbq->sli4_xritag != xri)
+			continue;
+		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+		psb->exch_busy = 0;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		if (!list_empty(&pring->txq))
+			lpfc_worker_wake_up(phba);
+		return;
+
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_sblist: pointer to the scsi buffer list.
+ *
+ * This routine walks a list of scsi buffers that was passed in. It attempts
+ * to construct blocks of scsi buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_sblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+static int
+lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
+			     struct list_head *post_sblist, int sb_count)
+{
+	struct lpfc_scsi_buf *psb, *psb_next;
+	int status, sgl_size;
+	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+	dma_addr_t pdma_phys_bpl1;
+	int last_xritag = NO_XRI;
+	LIST_HEAD(prep_sblist);
+	LIST_HEAD(blck_sblist);
+	LIST_HEAD(scsi_sblist);
+
+	/* sanity check */
+	if (sb_count <= 0)
+		return -EINVAL;
+
+	sgl_size = phba->cfg_sg_dma_buf_size -
+		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
+		list_del_init(&psb->list);
+		block_cnt++;
+		if ((last_xritag != NO_XRI) &&
+		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+			/* a hole in xri block, form a sgl posting block */
+			list_splice_init(&prep_sblist, &blck_sblist);
+			post_cnt = block_cnt - 1;
+			/* prepare list for next posting block */
+			list_add_tail(&psb->list, &prep_sblist);
+			block_cnt = 1;
+		} else {
+			/* prepare list for next posting block */
+			list_add_tail(&psb->list, &prep_sblist);
+			/* enough sgls for non-embed sgl mbox command */
+			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				list_splice_init(&prep_sblist, &blck_sblist);
+				post_cnt = block_cnt;
+				block_cnt = 0;
+			}
+		}
+		num_posting++;
+		last_xritag = psb->cur_iocbq.sli4_xritag;
+
+		/* end of repost sgl list condition for SCSI buffers */
+		if (num_posting == sb_count) {
+			if (post_cnt == 0) {
+				/* last sgl posting block */
+				list_splice_init(&prep_sblist, &blck_sblist);
+				post_cnt = block_cnt;
+			} else if (block_cnt == 1) {
+				/* last single sgl with non-contiguous xri */
+				if (sgl_size > SGL_PAGE_SIZE)
+					pdma_phys_bpl1 = psb->dma_phys_bpl +
+								SGL_PAGE_SIZE;
+				else
+					pdma_phys_bpl1 = 0;
+				status = lpfc_sli4_post_sgl(phba,
+						psb->dma_phys_bpl,
+						pdma_phys_bpl1,
+						psb->cur_iocbq.sli4_xritag);
+				if (status) {
+					/* failure, put on abort scsi list */
+					psb->exch_busy = 1;
+				} else {
+					/* success, put on SCSI buffer list */
+					psb->exch_busy = 0;
+					psb->status = IOSTAT_SUCCESS;
+					num_posted++;
+				}
+				/* success, put on SCSI buffer sgl list */
+				list_add_tail(&psb->list, &scsi_sblist);
+			}
+		}
+
+		/* continue until a nembed page worth of sgls */
+		if (post_cnt == 0)
+			continue;
+
+		/* post block of SCSI buffer list sgls */
+		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
+						       post_cnt);
+
+		/* don't reset xirtag due to hole in xri block */
+		if (block_cnt == 0)
+			last_xritag = NO_XRI;
+
+		/* reset SCSI buffer post count for next round of posting */
+		post_cnt = 0;
+
+		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
+		while (!list_empty(&blck_sblist)) {
+			list_remove_head(&blck_sblist, psb,
+					 struct lpfc_scsi_buf, list);
+			if (status) {
+				/* failure, put on abort scsi list */
+				psb->exch_busy = 1;
+			} else {
+				/* success, put on SCSI buffer list */
+				psb->exch_busy = 0;
+				psb->status = IOSTAT_SUCCESS;
+				num_posted++;
+			}
+			list_add_tail(&psb->list, &scsi_sblist);
+		}
+	}
+	/* Push SCSI buffers with sgl posted to the availble list */
+	while (!list_empty(&scsi_sblist)) {
+		list_remove_head(&scsi_sblist, psb,
+				 struct lpfc_scsi_buf, list);
+		lpfc_release_scsi_buf_s4(phba, psb);
+	}
+	return num_posted;
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+	LIST_HEAD(post_sblist);
+	int num_posted, rc = 0;
+
+	/* get all SCSI buffers need to repost to a local list */
+	spin_lock_irq(&phba->scsi_buf_list_get_lock);
+	spin_lock(&phba->scsi_buf_list_put_lock);
+	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+	spin_unlock(&phba->scsi_buf_list_put_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+
+	/* post the list of scsi buffer sgls to port if available */
+	if (!list_empty(&post_sblist)) {
+		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
+						phba->sli4_hba.scsi_xri_cnt);
+		/* failed to post any scsi buffer, return error */
+		if (num_posted == 0)
+			rc = -EIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates scsi buffers for device with SLI-4 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated and posted.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb;
+	struct sli4_sge *sgl;
+	IOCB_t *iocb;
+	dma_addr_t pdma_phys_fcp_cmd;
+	dma_addr_t pdma_phys_fcp_rsp;
+	dma_addr_t pdma_phys_bpl;
+	uint16_t iotag, lxri = 0;
+	int bcnt, num_posted, sgl_size;
+	LIST_HEAD(prep_sblist);
+	LIST_HEAD(post_sblist);
+	LIST_HEAD(scsi_sblist);
+
+	sgl_size = phba->cfg_sg_dma_buf_size -
+		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+			 (int)sizeof(struct fcp_cmnd),
+			 (int)sizeof(struct fcp_rsp));
+
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		if (!psb)
+			break;
+		/*
+		 * Get memory from the pci pool to map the virt space to
+		 * pci bus space for an I/O. The DMA buffer includes space
+		 * for the struct fcp_cmnd, struct fcp_rsp and the number
+		 * of bde's necessary to support the sg_tablesize.
+		 */
+		psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+						GFP_KERNEL, &psb->dma_handle);
+		if (!psb->data) {
+			kfree(psb);
+			break;
+		}
+
+		/*
+		 * 4K Page alignment is CRITICAL to BlockGuard, double check
+		 * to be sure.
+		 */
+		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
+		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+
+
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+				      psb->data, psb->dma_handle);
+			kfree(psb);
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"3368 Failed to allocate IOTAG for"
+					" XRI:0x%x\n", lxri);
+			lpfc_sli4_free_xri(phba, lxri);
+			break;
+		}
+		psb->cur_iocbq.sli4_lxritag = lxri;
+		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+		psb->fcp_bpl = psb->data;
+		psb->fcp_cmnd = (psb->data + sgl_size);
+		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
+					sizeof(struct fcp_cmnd));
+
+		/* Initialize local short-hand pointers. */
+		sgl = (struct sli4_sge *)psb->fcp_bpl;
+		pdma_phys_bpl = psb->dma_handle;
+		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
+		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+
+		/*
+		 * The first two bdes are the FCP_CMD and FCP_RSP.
+		 * The balance are sg list bdes. Initialize the
+		 * first two and leave the rest for queuecommand.
+		 */
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+		sgl++;
+
+		/* Setup the physical region for the FCP RSP */
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
+
+		/*
+		 * Since the IOCB for the FCP I/O is built into this
+		 * lpfc_scsi_buf, initialize it with all known data now.
+		 */
+		iocb = &psb->cur_iocbq.iocb;
+		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+		/* setting the BLP size to 2 * sizeof BDE may not be correct.
+		 * We are setting the bpl to point to out sgl. An sgl's
+		 * entries are 16 bytes, a bpl entries are 12 bytes.
+		 */
+		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
+		iocb->ulpBdeCount = 1;
+		iocb->ulpLe = 1;
+		iocb->ulpClass = CLASS3;
+		psb->cur_iocbq.context1 = psb;
+		psb->dma_phys_bpl = pdma_phys_bpl;
+
+		/* add the scsi buffer to a post list */
+		list_add_tail(&psb->list, &post_sblist);
+		spin_lock_irq(&phba->scsi_buf_list_get_lock);
+		phba->sli4_hba.scsi_xri_cnt++;
+		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
+			"3021 Allocate %d out of %d requested new SCSI "
+			"buffers\n", bcnt, num_to_alloc);
+
+	/* post the list of scsi buffer sgls to port if available */
+	if (!list_empty(&post_sblist))
+		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
+							  &post_sblist, bcnt);
+	else
+		num_posted = 0;
+
+	return num_posted;
+}
+
+/**
+ * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine wraps the actual SCSI buffer allocator function pointer from
+ * the lpfc_hba struct.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static inline int
+lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
+}
+
+/**
+ * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
+	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
+	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+			 list);
+	if (!lpfc_cmd) {
+		spin_lock(&phba->scsi_buf_list_put_lock);
+		list_splice(&phba->lpfc_scsi_buf_list_put,
+			    &phba->lpfc_scsi_buf_list_get);
+		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+		list_remove_head(scsi_buf_list_get, lpfc_cmd,
+				 struct lpfc_scsi_buf, list);
+		spin_unlock(&phba->scsi_buf_list_put_lock);
+	}
+	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+	return  lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
+	unsigned long iflag = 0;
+	int found = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
+	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+				 &phba->lpfc_scsi_buf_list_get, list) {
+		if (lpfc_test_rrq_active(phba, ndlp,
+					 lpfc_cmd->cur_iocbq.sli4_lxritag))
+			continue;
+		list_del(&lpfc_cmd->list);
+		found = 1;
+		break;
+	}
+	if (!found) {
+		spin_lock(&phba->scsi_buf_list_put_lock);
+		list_splice(&phba->lpfc_scsi_buf_list_put,
+			    &phba->lpfc_scsi_buf_list_get);
+		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+		spin_unlock(&phba->scsi_buf_list_put_lock);
+		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
+					 &phba->lpfc_scsi_buf_list_get, list) {
+			if (lpfc_test_rrq_active(
+				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+				continue;
+			list_del(&lpfc_cmd->list);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
+	if (!found)
+		return NULL;
+	return  lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	return  phba->lpfc_get_scsi_buf(phba, ndlp);
+}
+
+/**
+ * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	unsigned long iflag = 0;
+
+	psb->seg_cnt = 0;
+	psb->nonsg_phys = 0;
+	psb->prot_seg_cnt = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+	psb->pCmd = NULL;
+	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+}
+
+/**
+ * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	unsigned long iflag = 0;
+
+	psb->seg_cnt = 0;
+	psb->nonsg_phys = 0;
+	psb->prot_seg_cnt = 0;
+
+	if (psb->exch_busy) {
+		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
+					iflag);
+		psb->pCmd = NULL;
+		list_add_tail(&psb->list,
+			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+					iflag);
+	} else {
+		psb->pCmd = NULL;
+		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+	}
+}
+
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+
+	phba->lpfc_release_scsi_buf(phba, psb);
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
+ * through sg elements and format the bde. This routine also initializes all
+ * IOCB fields which are dependent on scsi command request buffer.
+ *
+ * Return codes:
+ *   1 - Error
+ *   0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct scatterlist *sgel = NULL;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
+	dma_addr_t physaddr;
+	uint32_t num_bde = 0;
+	int nseg, datadir = scsi_cmnd->sc_data_direction;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	bpl += 2;
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+
+		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
+				  scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!nseg))
+			return 1;
+
+		lpfc_cmd->seg_cnt = nseg;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9064 BLKGRD: %s: Too many sg segments from "
+			       "dma_map_sg.  Config %d, seg_cnt %d\n",
+			       __func__, phba->cfg_sg_seg_cnt,
+			       lpfc_cmd->seg_cnt);
+			lpfc_cmd->seg_cnt = 0;
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		/*
+		 * The driver established a maximum scatter-gather segment count
+		 * during probe that limits the number of sg elements in any
+		 * single scsi command.  Just run through the seg_cnt and format
+		 * the bde's.
+		 * When using SLI-3 the driver will try to fit all the BDEs into
+		 * the IOCB. If it can't then the BDEs get added to a BPL as it
+		 * does for SLI-2 mode.
+		 */
+		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+			physaddr = sg_dma_address(sgel);
+			if (phba->sli_rev == 3 &&
+			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
+				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
+				data_bde->addrLow = putPaddrLow(physaddr);
+				data_bde->addrHigh = putPaddrHigh(physaddr);
+				data_bde++;
+			} else {
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+				bpl->tus.f.bdeSize = sg_dma_len(sgel);
+				bpl->tus.w = le32_to_cpu(bpl->tus.w);
+				bpl->addrLow =
+					le32_to_cpu(putPaddrLow(physaddr));
+				bpl->addrHigh =
+					le32_to_cpu(putPaddrHigh(physaddr));
+				bpl++;
+			}
+		}
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
+	 * explicitly reinitialized and for SLI-3 the extended bde count is
+	 * explicitly reinitialized since all iocb memory resources are reused.
+	 */
+	if (phba->sli_rev == 3 &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
+			/*
+			 * The extended IOCB format can only fit 3 BDE or a BPL.
+			 * This I/O has more than 3 BDE so the 1st data bde will
+			 * be a BPL that is filled in here.
+			 */
+			physaddr = lpfc_cmd->dma_handle;
+			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+			data_bde->tus.f.bdeSize = (num_bde *
+						   sizeof(struct ulp_bde64));
+			physaddr += (sizeof(struct fcp_cmnd) +
+				     sizeof(struct fcp_rsp) +
+				     (2 * sizeof(struct ulp_bde64)));
+			data_bde->addrHigh = putPaddrHigh(physaddr);
+			data_bde->addrLow = putPaddrLow(physaddr);
+			/* ebde count includes the response bde and data bpl */
+			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
+		} else {
+			/* ebde count includes the response bde and data bdes */
+			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+		}
+	} else {
+		iocb_cmd->un.fcpi64.bdl.bdeSize =
+			((num_bde + 2) * sizeof(struct ulp_bde64));
+		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+	}
+	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+	return 0;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* Return BG_ERR_INIT if error injection is detected by Initiator */
+#define BG_ERR_INIT	0x1
+/* Return BG_ERR_TGT if error injection is detected by Target */
+#define BG_ERR_TGT	0x2
+/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP	0x10
+/**
+ * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
+ * error injection
+ **/
+#define BG_ERR_CHECK	0x20
+
+/**
+ * lpfc_bg_err_inject - Determine if we should inject an error
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @reftag: (out) BlockGuard reference tag for transmitted data
+ * @apptag: (out) BlockGuard application tag for transmitted data
+ * @new_guard (in) Value to replace CRC with if needed
+ *
+ * Returns BG_ERR_* bit mask or 0 if request ignored
+ **/
+static int
+lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
+{
+	struct scatterlist *sgpe; /* s/g prot entry */
+	struct lpfc_scsi_buf *lpfc_cmd = NULL;
+	struct scsi_dif_tuple *src = NULL;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_rport_data *rdata;
+	uint32_t op = scsi_get_prot_op(sc);
+	uint32_t blksize;
+	uint32_t numblks;
+	sector_t lba;
+	int rc = 0;
+	int blockoff = 0;
+
+	if (op == SCSI_PROT_NORMAL)
+		return 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	lba = scsi_get_lba(sc);
+
+	/* First check if we need to match the LBA */
+	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
+		blksize = lpfc_cmd_blksize(sc);
+		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
+
+		/* Make sure we have the right LBA if one is specified */
+		if ((phba->lpfc_injerr_lba < lba) ||
+			(phba->lpfc_injerr_lba >= (lba + numblks)))
+			return 0;
+		if (sgpe) {
+			blockoff = phba->lpfc_injerr_lba - lba;
+			numblks = sg_dma_len(sgpe) /
+				sizeof(struct scsi_dif_tuple);
+			if (numblks < blockoff)
+				blockoff = numblks;
+		}
+	}
+
+	/* Next check if we need to match the remote NPortID or WWPN */
+	rdata = lpfc_rport_data_from_scsi_device(sc->device);
+	if (rdata && rdata->pnode) {
+		ndlp = rdata->pnode;
+
+		/* Make sure we have the right NPortID if one is specified */
+		if (phba->lpfc_injerr_nportid  &&
+			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+			return 0;
+
+		/*
+		 * Make sure we have the right WWPN if one is specified.
+		 * wwn[0] should be a non-zero NAA in a good WWPN.
+		 */
+		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
+			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+				sizeof(struct lpfc_name)) != 0))
+			return 0;
+	}
+
+	/* Setup a ptr to the protection data if the SCSI host provides it */
+	if (sgpe) {
+		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+		src += blockoff;
+		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+	}
+
+	/* Should we change the Reference Tag */
+	if (reftag) {
+		if (phba->lpfc_injerr_wref_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (src) {
+					/*
+					 * For WRITE_PASS, force the error
+					 * to be sent on the wire. It should
+					 * be detected by the Target.
+					 * If blockoff != 0 error will be
+					 * inserted in middle of the IO.
+					 */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9076 BLKGRD: Injecting reftag error: "
+					"write lba x%lx + x%x oldrefTag x%x\n",
+					(unsigned long)lba, blockoff,
+					be32_to_cpu(src->ref_tag));
+
+					/*
+					 * Save the old ref_tag so we can
+					 * restore it on completion.
+					 */
+					if (lpfc_cmd) {
+						lpfc_cmd->prot_data_type =
+							LPFC_INJERR_REFTAG;
+						lpfc_cmd->prot_data_segment =
+							src;
+						lpfc_cmd->prot_data =
+							src->ref_tag;
+					}
+					src->ref_tag = cpu_to_be32(0xDEADBEEF);
+					phba->lpfc_injerr_wref_cnt--;
+					if (phba->lpfc_injerr_wref_cnt == 0) {
+						phba->lpfc_injerr_nportid = 0;
+						phba->lpfc_injerr_lba =
+							LPFC_INJERR_LBA_OFF;
+						memset(&phba->lpfc_injerr_wwpn,
+						  0, sizeof(struct lpfc_name));
+					}
+					rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the error
+				 * to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				/* DEADBEEF will be the reftag on the wire */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_wref_cnt--;
+				if (phba->lpfc_injerr_wref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+					LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9078 BLKGRD: Injecting reftag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_wref_cnt--;
+				if (phba->lpfc_injerr_wref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9077 BLKGRD: Injecting reftag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rref_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_rref_cnt--;
+				if (phba->lpfc_injerr_rref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9079 BLKGRD: Injecting reftag error: "
+					"read lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+	}
+
+	/* Should we change the Application Tag */
+	if (apptag) {
+		if (phba->lpfc_injerr_wapp_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (src) {
+					/*
+					 * For WRITE_PASS, force the error
+					 * to be sent on the wire. It should
+					 * be detected by the Target.
+					 * If blockoff != 0 error will be
+					 * inserted in middle of the IO.
+					 */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9080 BLKGRD: Injecting apptag error: "
+					"write lba x%lx + x%x oldappTag x%x\n",
+					(unsigned long)lba, blockoff,
+					be16_to_cpu(src->app_tag));
+
+					/*
+					 * Save the old app_tag so we can
+					 * restore it on completion.
+					 */
+					if (lpfc_cmd) {
+						lpfc_cmd->prot_data_type =
+							LPFC_INJERR_APPTAG;
+						lpfc_cmd->prot_data_segment =
+							src;
+						lpfc_cmd->prot_data =
+							src->app_tag;
+					}
+					src->app_tag = cpu_to_be16(0xDEAD);
+					phba->lpfc_injerr_wapp_cnt--;
+					if (phba->lpfc_injerr_wapp_cnt == 0) {
+						phba->lpfc_injerr_nportid = 0;
+						phba->lpfc_injerr_lba =
+							LPFC_INJERR_LBA_OFF;
+						memset(&phba->lpfc_injerr_wwpn,
+						  0, sizeof(struct lpfc_name));
+					}
+					rc = BG_ERR_TGT | BG_ERR_CHECK;
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				/* DEAD will be the apptag on the wire */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_wapp_cnt--;
+				if (phba->lpfc_injerr_wapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0813 BLKGRD: Injecting apptag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_wapp_cnt--;
+				if (phba->lpfc_injerr_wapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0812 BLKGRD: Injecting apptag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rapp_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_rapp_cnt--;
+				if (phba->lpfc_injerr_rapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0814 BLKGRD: Injecting apptag error: "
+					"read lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+	}
+
+
+	/* Should we change the Guard Tag */
+	if (new_guard) {
+		if (phba->lpfc_injerr_wgrd_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				rc = BG_ERR_CHECK;
+				/* Drop thru */
+
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				if (phba->lpfc_injerr_wgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc |= BG_ERR_TGT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0817 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				if (phba->lpfc_injerr_wgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc = BG_ERR_INIT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0816 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rgrd_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				phba->lpfc_injerr_rgrd_cnt--;
+				if (phba->lpfc_injerr_rgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc = BG_ERR_INIT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0818 BLKGRD: Injecting guard error: "
+					"read lba x%lx\n", (unsigned long)lba);
+			}
+		}
+	}
+
+	return rc;
+}
+#endif
+
+/**
+ * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
+ * the specified SCSI command.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint8_t *txop, uint8_t *rxop)
+{
+	uint8_t ret = 0;
+
+	if (lpfc_cmd_guard_csum(sc)) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CRC_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CRC_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_CRC;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
+					scsi_get_prot_op(sc));
+			ret = 1;
+			break;
+
+		}
+	} else {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CRC_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CRC_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
+					scsi_get_prot_op(sc));
+			ret = 1;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
+ * the specified SCSI command in order to force a guard tag error.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint8_t *txop, uint8_t *rxop)
+{
+	uint8_t ret = 0;
+
+	if (lpfc_cmd_guard_csum(sc)) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CSUM_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			break;
+
+		}
+	} else {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			break;
+		}
+	}
+
+	return ret;
+}
+#endif
+
+/**
+ * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ *                                +-------------------------+
+ *   start of prot group  -->     |          PDE_5          |
+ *                                +-------------------------+
+ *                                |          PDE_6          |
+ *                                +-------------------------+
+ *                                |         Data BDE        |
+ *                                +-------------------------+
+ *                                |more Data BDE's ... (opt)|
+ *                                +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
+static int
+lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct ulp_bde64 *bpl, int datasegcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct lpfc_pde5 *pde5 = NULL;
+	struct lpfc_pde6 *pde6 = NULL;
+	dma_addr_t physaddr;
+	int i = 0, num_bde = 0, status;
+	int datadir = sc->sc_data_direction;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t rc;
+#endif
+	uint32_t checking = 1;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+
+	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command for pde*/
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	/* setup PDE5 with what we have */
+	pde5 = (struct lpfc_pde5 *) bpl;
+	memset(pde5, 0, sizeof(struct lpfc_pde5));
+	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+
+	/* Endianness conversion if necessary for PDE5 */
+	pde5->word0 = cpu_to_le32(pde5->word0);
+	pde5->reftag = cpu_to_le32(reftag);
+
+	/* advance bpl and increment bde count */
+	num_bde++;
+	bpl++;
+	pde6 = (struct lpfc_pde6 *) bpl;
+
+	/* setup PDE6 with the rest of the info */
+	memset(pde6, 0, sizeof(struct lpfc_pde6));
+	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+	bf_set(pde6_optx, pde6, txop);
+	bf_set(pde6_oprx, pde6, rxop);
+
+	/*
+	 * We only need to check the data on READs, for WRITEs
+	 * protection data is automatically generated, not checked.
+	 */
+	if (datadir == DMA_FROM_DEVICE) {
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+			bf_set(pde6_ce, pde6, checking);
+		else
+			bf_set(pde6_ce, pde6, 0);
+
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+			bf_set(pde6_re, pde6, checking);
+		else
+			bf_set(pde6_re, pde6, 0);
+	}
+	bf_set(pde6_ai, pde6, 1);
+	bf_set(pde6_ae, pde6, 0);
+	bf_set(pde6_apptagval, pde6, 0);
+
+	/* Endianness conversion if necessary for PDE6 */
+	pde6->word0 = cpu_to_le32(pde6->word0);
+	pde6->word1 = cpu_to_le32(pde6->word1);
+	pde6->word2 = cpu_to_le32(pde6->word2);
+
+	/* advance bpl and increment bde count */
+	num_bde++;
+	bpl++;
+
+	/* assumption: caller has already run dma_map_sg on command data */
+	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+		physaddr = sg_dma_address(sgde);
+		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+		bpl->tus.f.bdeSize = sg_dma_len(sgde);
+		if (datadir == DMA_TO_DEVICE)
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+		num_bde++;
+	}
+
+out:
+	return num_bde;
+}
+
+/**
+ * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream.  For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ *                                    +-------------------------+
+ *   start of first prot group  -->   |          PDE_5          |
+ *                                    +-------------------------+
+ *                                    |          PDE_6          |
+ *                                    +-------------------------+
+ *                                    |      PDE_7 (Prot BDE)   |
+ *                                    +-------------------------+
+ *                                    |        Data BDE         |
+ *                                    +-------------------------+
+ *                                    |more Data BDE's ... (opt)|
+ *                                    +-------------------------+
+ *   start of new  prot group  -->    |          PDE_5          |
+ *                                    +-------------------------+
+ *                                    |          ...            |
+ *                                    +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ *       mapped for DMA
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
+static int
+lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct ulp_bde64 *bpl, int datacnt, int protcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct scatterlist *sgpe = NULL; /* s/g prot entry */
+	struct lpfc_pde5 *pde5 = NULL;
+	struct lpfc_pde6 *pde6 = NULL;
+	struct lpfc_pde7 *pde7 = NULL;
+	dma_addr_t dataphysaddr, protphysaddr;
+	unsigned short curr_data = 0, curr_prot = 0;
+	unsigned int split_offset;
+	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+	unsigned int protgrp_blks, protgrp_bytes;
+	unsigned int remainder, subtotal;
+	int status;
+	int datadir = sc->sc_data_direction;
+	unsigned char pgdone = 0, alldone = 0;
+	unsigned blksize;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t rc;
+#endif
+	uint32_t checking = 1;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+	int num_bde = 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
+	if (!sgpe || !sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				sgpe, sgde);
+		return 0;
+	}
+
+	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command */
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	split_offset = 0;
+	do {
+		/* Check to see if we ran out of space */
+		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+			return num_bde + 3;
+
+		/* setup PDE5 with what we have */
+		pde5 = (struct lpfc_pde5 *) bpl;
+		memset(pde5, 0, sizeof(struct lpfc_pde5));
+		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+
+		/* Endianness conversion if necessary for PDE5 */
+		pde5->word0 = cpu_to_le32(pde5->word0);
+		pde5->reftag = cpu_to_le32(reftag);
+
+		/* advance bpl and increment bde count */
+		num_bde++;
+		bpl++;
+		pde6 = (struct lpfc_pde6 *) bpl;
+
+		/* setup PDE6 with the rest of the info */
+		memset(pde6, 0, sizeof(struct lpfc_pde6));
+		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+		bf_set(pde6_optx, pde6, txop);
+		bf_set(pde6_oprx, pde6, rxop);
+
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+			bf_set(pde6_ce, pde6, checking);
+		else
+			bf_set(pde6_ce, pde6, 0);
+
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+			bf_set(pde6_re, pde6, checking);
+		else
+			bf_set(pde6_re, pde6, 0);
+
+		bf_set(pde6_ai, pde6, 1);
+		bf_set(pde6_ae, pde6, 0);
+		bf_set(pde6_apptagval, pde6, 0);
+
+		/* Endianness conversion if necessary for PDE6 */
+		pde6->word0 = cpu_to_le32(pde6->word0);
+		pde6->word1 = cpu_to_le32(pde6->word1);
+		pde6->word2 = cpu_to_le32(pde6->word2);
+
+		/* advance bpl and increment bde count */
+		num_bde++;
+		bpl++;
+
+		/* setup the first BDE that points to protection buffer */
+		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+		/* must be integer multiple of the DIF block length */
+		BUG_ON(protgroup_len % 8);
+
+		pde7 = (struct lpfc_pde7 *) bpl;
+		memset(pde7, 0, sizeof(struct lpfc_pde7));
+		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
+
+		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
+
+		protgrp_blks = protgroup_len / 8;
+		protgrp_bytes = protgrp_blks * blksize;
+
+		/* check if this pde is crossing the 4K boundary; if so split */
+		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
+			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
+			protgroup_offset += protgroup_remainder;
+			protgrp_blks = protgroup_remainder / 8;
+			protgrp_bytes = protgrp_blks * blksize;
+		} else {
+			protgroup_offset = 0;
+			curr_prot++;
+		}
+
+		num_bde++;
+
+		/* setup BDE's for data blocks associated with DIF data */
+		pgdone = 0;
+		subtotal = 0; /* total bytes processed for current prot grp */
+		while (!pgdone) {
+			/* Check to see if we ran out of space */
+			if (num_bde >= phba->cfg_total_seg_cnt)
+				return num_bde + 1;
+
+			if (!sgde) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9065 BLKGRD:%s Invalid data segment\n",
+						__func__);
+				return 0;
+			}
+			bpl++;
+			dataphysaddr = sg_dma_address(sgde) + split_offset;
+			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
+			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
+
+			remainder = sg_dma_len(sgde) - split_offset;
+
+			if ((subtotal + remainder) <= protgrp_bytes) {
+				/* we can use this whole buffer */
+				bpl->tus.f.bdeSize = remainder;
+				split_offset = 0;
+
+				if ((subtotal + remainder) == protgrp_bytes)
+					pgdone = 1;
+			} else {
+				/* must split this buffer with next prot grp */
+				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
+				split_offset += bpl->tus.f.bdeSize;
+			}
+
+			subtotal += bpl->tus.f.bdeSize;
+
+			if (datadir == DMA_TO_DEVICE)
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+			else
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+			bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+			num_bde++;
+			curr_data++;
+
+			if (split_offset)
+				break;
+
+			/* Move to the next s/g segment if possible */
+			sgde = sg_next(sgde);
+
+		}
+
+		if (protgroup_offset) {
+			/* update the reference tag */
+			reftag += protgrp_blks;
+			bpl++;
+			continue;
+		}
+
+		/* are we done ? */
+		if (curr_prot == protcnt) {
+			alldone = 1;
+		} else if (curr_prot < protcnt) {
+			/* advance to next prot buffer */
+			sgpe = sg_next(sgpe);
+			bpl++;
+
+			/* update the reference tag */
+			reftag += protgrp_blks;
+		} else {
+			/* if we're here, we have a bug */
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9054 BLKGRD: bug in %s\n", __func__);
+		}
+
+	} while (!alldone);
+out:
+
+	return num_bde;
+}
+
+/**
+ * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ *                                +-------------------------+
+ *   start of prot group  -->     |         DI_SEED         |
+ *                                +-------------------------+
+ *                                |         Data SGE        |
+ *                                +-------------------------+
+ *                                |more Data SGE's ... (opt)|
+ *                                +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct sli4_sge *sgl, int datasegcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct sli4_sge_diseed *diseed = NULL;
+	dma_addr_t physaddr;
+	int i = 0, num_sge = 0, status;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t rc;
+#endif
+	uint32_t checking = 1;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+
+	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command for pde*/
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	/* setup DISEED with what we have */
+	diseed = (struct sli4_sge_diseed *) sgl;
+	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+	/* Endianness conversion if necessary */
+	diseed->ref_tag = cpu_to_le32(reftag);
+	diseed->ref_tag_tran = diseed->ref_tag;
+
+	/*
+	 * We only need to check the data on READs, for WRITEs
+	 * protection data is automatically generated, not checked.
+	 */
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+		else
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+		else
+			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+	}
+
+	/* setup DISEED with the rest of the info */
+	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+
+	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+	/* Endianness conversion if necessary for DISEED */
+	diseed->word2 = cpu_to_le32(diseed->word2);
+	diseed->word3 = cpu_to_le32(diseed->word3);
+
+	/* advance bpl and increment sge count */
+	num_sge++;
+	sgl++;
+
+	/* assumption: caller has already run dma_map_sg on command data */
+	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+		physaddr = sg_dma_address(sgde);
+		dma_len = sg_dma_len(sgde);
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+		if ((i + 1) == datasegcnt)
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+		else
+			bf_set(lpfc_sli4_sge_last, sgl, 0);
+		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+		sgl->sge_len = cpu_to_le32(dma_len);
+		dma_offset += dma_len;
+
+		sgl++;
+		num_sge++;
+	}
+
+out:
+	return num_sge;
+}
+
+/**
+ * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream.  For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ *                                    +-------------------------+
+ *   start of first prot group  -->   |         DISEED          |
+ *                                    +-------------------------+
+ *                                    |      DIF (Prot SGE)     |
+ *                                    +-------------------------+
+ *                                    |        Data SGE         |
+ *                                    +-------------------------+
+ *                                    |more Data SGE's ... (opt)|
+ *                                    +-------------------------+
+ *   start of new  prot group  -->    |         DISEED          |
+ *                                    +-------------------------+
+ *                                    |          ...            |
+ *                                    +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ *       mapped for DMA
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct sli4_sge *sgl, int datacnt, int protcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct scatterlist *sgpe = NULL; /* s/g prot entry */
+	struct sli4_sge_diseed *diseed = NULL;
+	dma_addr_t dataphysaddr, protphysaddr;
+	unsigned short curr_data = 0, curr_prot = 0;
+	unsigned int split_offset;
+	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+	unsigned int protgrp_blks, protgrp_bytes;
+	unsigned int remainder, subtotal;
+	int status;
+	unsigned char pgdone = 0, alldone = 0;
+	unsigned blksize;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+	uint32_t dma_len;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t rc;
+#endif
+	uint32_t checking = 1;
+	uint32_t dma_offset = 0;
+	int num_sge = 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
+	if (!sgpe || !sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				sgpe, sgde);
+		return 0;
+	}
+
+	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command */
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	split_offset = 0;
+	do {
+		/* Check to see if we ran out of space */
+		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+			return num_sge + 3;
+
+		/* setup DISEED with what we have */
+		diseed = (struct sli4_sge_diseed *) sgl;
+		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+		/* Endianness conversion if necessary */
+		diseed->ref_tag = cpu_to_le32(reftag);
+		diseed->ref_tag_tran = diseed->ref_tag;
+
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+		} else {
+			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+			/*
+			 * When in this mode, the hardware will replace
+			 * the guard tag from the host with a
+			 * newly generated good CRC for the wire.
+			 * Switch to raw mode here to avoid this
+			 * behavior. What the host sends gets put on the wire.
+			 */
+			if (txop == BG_OP_IN_CRC_OUT_CRC) {
+				txop = BG_OP_RAW_MODE;
+				rxop = BG_OP_RAW_MODE;
+			}
+		}
+
+
+		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
+			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+		else
+			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
+		/* setup DISEED with the rest of the info */
+		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+
+		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+		/* Endianness conversion if necessary for DISEED */
+		diseed->word2 = cpu_to_le32(diseed->word2);
+		diseed->word3 = cpu_to_le32(diseed->word3);
+
+		/* advance sgl and increment bde count */
+		num_sge++;
+		sgl++;
+
+		/* setup the first BDE that points to protection buffer */
+		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+		/* must be integer multiple of the DIF block length */
+		BUG_ON(protgroup_len % 8);
+
+		/* Now setup DIF SGE */
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
+		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
+		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
+		sgl->word2 = cpu_to_le32(sgl->word2);
+
+		protgrp_blks = protgroup_len / 8;
+		protgrp_bytes = protgrp_blks * blksize;
+
+		/* check if DIF SGE is crossing the 4K boundary; if so split */
+		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
+			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
+			protgroup_offset += protgroup_remainder;
+			protgrp_blks = protgroup_remainder / 8;
+			protgrp_bytes = protgrp_blks * blksize;
+		} else {
+			protgroup_offset = 0;
+			curr_prot++;
+		}
+
+		num_sge++;
+
+		/* setup SGE's for data blocks associated with DIF data */
+		pgdone = 0;
+		subtotal = 0; /* total bytes processed for current prot grp */
+		while (!pgdone) {
+			/* Check to see if we ran out of space */
+			if (num_sge >= phba->cfg_total_seg_cnt)
+				return num_sge + 1;
+
+			if (!sgde) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9086 BLKGRD:%s Invalid data segment\n",
+						__func__);
+				return 0;
+			}
+			sgl++;
+			dataphysaddr = sg_dma_address(sgde) + split_offset;
+
+			remainder = sg_dma_len(sgde) - split_offset;
+
+			if ((subtotal + remainder) <= protgrp_bytes) {
+				/* we can use this whole buffer */
+				dma_len = remainder;
+				split_offset = 0;
+
+				if ((subtotal + remainder) == protgrp_bytes)
+					pgdone = 1;
+			} else {
+				/* must split this buffer with next prot grp */
+				dma_len = protgrp_bytes - subtotal;
+				split_offset += dma_len;
+			}
+
+			subtotal += dma_len;
+
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
+			bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+			sgl->sge_len = cpu_to_le32(dma_len);
+			dma_offset += dma_len;
+
+			num_sge++;
+			curr_data++;
+
+			if (split_offset)
+				break;
+
+			/* Move to the next s/g segment if possible */
+			sgde = sg_next(sgde);
+		}
+
+		if (protgroup_offset) {
+			/* update the reference tag */
+			reftag += protgrp_blks;
+			sgl++;
+			continue;
+		}
+
+		/* are we done ? */
+		if (curr_prot == protcnt) {
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+			alldone = 1;
+		} else if (curr_prot < protcnt) {
+			/* advance to next prot buffer */
+			sgpe = sg_next(sgpe);
+			sgl++;
+
+			/* update the reference tag */
+			reftag += protgrp_blks;
+		} else {
+			/* if we're here, we have a bug */
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9085 BLKGRD: bug in %s\n", __func__);
+		}
+
+	} while (!alldone);
+
+out:
+
+	return num_sge;
+}
+
+/**
+ * lpfc_prot_group_type - Get prtotection group type of SCSI command
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ *
+ * Given a SCSI command that supports DIF, determine composition of protection
+ * groups involved in setting up buffer lists
+ *
+ * Returns: Protection group type (with or without DIF)
+ *
+ **/
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+{
+	int ret = LPFC_PG_TYPE_INVALID;
+	unsigned char op = scsi_get_prot_op(sc);
+
+	switch (op) {
+	case SCSI_PROT_READ_STRIP:
+	case SCSI_PROT_WRITE_INSERT:
+		ret = LPFC_PG_TYPE_NO_DIF;
+		break;
+	case SCSI_PROT_READ_INSERT:
+	case SCSI_PROT_WRITE_STRIP:
+	case SCSI_PROT_READ_PASS:
+	case SCSI_PROT_WRITE_PASS:
+		ret = LPFC_PG_TYPE_DIF_BUF;
+		break;
+	default:
+		if (phba)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9021 Unsupported protection op:%d\n",
+					op);
+		break;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+		       struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+	int fcpdl;
+
+	fcpdl = scsi_bufflen(sc);
+
+	/* Check if there is protection data on the wire */
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		/* Read check for protection data */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+			return fcpdl;
+
+	} else {
+		/* Write check for protection data */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+			return fcpdl;
+	}
+
+	/*
+	 * If we are in DIF Type 1 mode every data block has a 8 byte
+	 * DIF (trailer) attached to it. Must ajust FCP data length
+	 * to account for the protection data.
+	 */
+	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+	return fcpdl;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	uint32_t num_bde = 0;
+	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+	int prot_group_type = 0;
+	int fcpdl;
+	struct lpfc_vport *vport = phba->pport;
+
+	/*
+	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
+	 *  fcp_rsp regions to the first data bde entry
+	 */
+	bpl += 2;
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+		datasegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_sglist(scsi_cmnd),
+					scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!datasegcnt))
+			return 1;
+
+		lpfc_cmd->seg_cnt = datasegcnt;
+
+		/* First check if data segment count from SCSI Layer is good */
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+			goto err;
+
+		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+		switch (prot_group_type) {
+		case LPFC_PG_TYPE_NO_DIF:
+
+			/* Here we need to add a PDE5 and PDE6 to the count */
+			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+				goto err;
+
+			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
+					datasegcnt);
+			/* we should have 2 or more entries in buffer list */
+			if (num_bde < 2)
+				goto err;
+			break;
+
+		case LPFC_PG_TYPE_DIF_BUF:
+			/*
+			 * This type indicates that protection buffers are
+			 * passed to the driver, so that needs to be prepared
+			 * for DMA
+			 */
+			protsegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_prot_sglist(scsi_cmnd),
+					scsi_prot_sg_count(scsi_cmnd), datadir);
+			if (unlikely(!protsegcnt)) {
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			lpfc_cmd->prot_seg_cnt = protsegcnt;
+
+			/*
+			 * There is a minimun of 4 BPLs used for every
+			 * protection data segment.
+			 */
+			if ((lpfc_cmd->prot_seg_cnt * 4) >
+			    (phba->cfg_total_seg_cnt - 2))
+				goto err;
+
+			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
+					datasegcnt, protsegcnt);
+			/* we should have 3 or more entries in buffer list */
+			if ((num_bde < 3) ||
+			    (num_bde > phba->cfg_total_seg_cnt))
+				goto err;
+			break;
+
+		case LPFC_PG_TYPE_INVALID:
+		default:
+			scsi_dma_unmap(scsi_cmnd);
+			lpfc_cmd->seg_cnt = 0;
+
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9022 Unexpected protection group %i\n",
+					prot_group_type);
+			return 1;
+		}
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
+	 * reinitialized since all iocb memory resources are used many times
+	 * for transmit, receive, and continuation bpl's.
+	 */
+	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
+	iocb_cmd->ulpBdeCount = 1;
+	iocb_cmd->ulpLe = 1;
+
+	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
+	/*
+	 * For First burst, we may need to adjust the initial transfer
+	 * length for DIF
+	 */
+	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+	    (fcpdl < vport->cfg_first_burst_size))
+		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
+	return 0;
+err:
+	if (lpfc_cmd->seg_cnt)
+		scsi_dma_unmap(scsi_cmnd);
+	if (lpfc_cmd->prot_seg_cnt)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+			     scsi_prot_sg_count(scsi_cmnd),
+			     scsi_cmnd->sc_data_direction);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"9023 Cannot setup S/G List for HBA"
+			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+			prot_group_type, num_bde);
+
+	lpfc_cmd->seg_cnt = 0;
+	lpfc_cmd->prot_seg_cnt = 0;
+	return 1;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+static uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+	uint16_t crc = 0;
+	uint16_t x;
+
+	crc = crc_t10dif(data, count);
+	x = cpu_to_be16(crc);
+	return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+static uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+	uint16_t ret;
+
+	ret = ip_compute_csum(data, count);
+	return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+static void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scatterlist *sgpe; /* s/g prot entry */
+	struct scatterlist *sgde; /* s/g data entry */
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	struct scsi_dif_tuple *src = NULL;
+	uint8_t *data_src = NULL;
+	uint16_t guard_tag;
+	uint16_t start_app_tag, app_tag;
+	uint32_t start_ref_tag, ref_tag;
+	int prot, protsegcnt;
+	int err_type, len, data_len;
+	int chk_ref, chk_app, chk_guard;
+	uint16_t sum;
+	unsigned blksize;
+
+	err_type = BGS_GUARD_ERR_MASK;
+	sum = 0;
+	guard_tag = 0;
+
+	/* First check to see if there is protection data to examine */
+	prot = scsi_get_prot_op(cmd);
+	if ((prot == SCSI_PROT_READ_STRIP) ||
+	    (prot == SCSI_PROT_WRITE_INSERT) ||
+	    (prot == SCSI_PROT_NORMAL))
+		goto out;
+
+	/* Currently the driver just supports ref_tag and guard_tag checking */
+	chk_ref = 1;
+	chk_app = 0;
+	chk_guard = 0;
+
+	/* Setup a ptr to the protection data provided by the SCSI host */
+	sgpe = scsi_prot_sglist(cmd);
+	protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+	if (sgpe && protsegcnt) {
+
+		/*
+		 * We will only try to verify guard tag if the segment
+		 * data length is a multiple of the blksize.
+		 */
+		sgde = scsi_sglist(cmd);
+		blksize = lpfc_cmd_blksize(cmd);
+		data_src = (uint8_t *)sg_virt(sgde);
+		data_len = sgde->length;
+		if ((data_len & (blksize - 1)) == 0)
+			chk_guard = 1;
+
+		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+		start_app_tag = src->app_tag;
+		len = sgpe->length;
+		while (src && protsegcnt) {
+			while (len) {
+
+				/*
+				 * First check to see if a protection data
+				 * check is valid
+				 */
+				if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
+				    (src->app_tag == T10_PI_APP_ESCAPE)) {
+					start_ref_tag++;
+					goto skipit;
+				}
+
+				/* First Guard Tag checking */
+				if (chk_guard) {
+					guard_tag = src->guard_tag;
+					if (lpfc_cmd_guard_csum(cmd))
+						sum = lpfc_bg_csum(data_src,
+								   blksize);
+					else
+						sum = lpfc_bg_crc(data_src,
+								  blksize);
+					if ((guard_tag != sum)) {
+						err_type = BGS_GUARD_ERR_MASK;
+						goto out;
+					}
+				}
+
+				/* Reference Tag checking */
+				ref_tag = be32_to_cpu(src->ref_tag);
+				if (chk_ref && (ref_tag != start_ref_tag)) {
+					err_type = BGS_REFTAG_ERR_MASK;
+					goto out;
+				}
+				start_ref_tag++;
+
+				/* App Tag checking */
+				app_tag = src->app_tag;
+				if (chk_app && (app_tag != start_app_tag)) {
+					err_type = BGS_APPTAG_ERR_MASK;
+					goto out;
+				}
+skipit:
+				len -= sizeof(struct scsi_dif_tuple);
+				if (len < 0)
+					len = 0;
+				src++;
+
+				data_src += blksize;
+				data_len -= blksize;
+
+				/*
+				 * Are we at the end of the Data segment?
+				 * The data segment is only used for Guard
+				 * tag checking.
+				 */
+				if (chk_guard && (data_len == 0)) {
+					chk_guard = 0;
+					sgde = sg_next(sgde);
+					if (!sgde)
+						goto out;
+
+					data_src = (uint8_t *)sg_virt(sgde);
+					data_len = sgde->length;
+					if ((data_len & (blksize - 1)) == 0)
+						chk_guard = 1;
+				}
+			}
+
+			/* Goto the next Protection data segment */
+			sgpe = sg_next(sgpe);
+			if (sgpe) {
+				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+				len = sgpe->length;
+			} else {
+				src = NULL;
+			}
+			protsegcnt--;
+		}
+	}
+out:
+	if (err_type == BGS_GUARD_ERR_MASK) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+					0x10, 0x1);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+		phba->bg_guard_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+				(unsigned long)scsi_get_lba(cmd),
+				sum, guard_tag);
+
+	} else if (err_type == BGS_REFTAG_ERR_MASK) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+					0x10, 0x3);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_reftag_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+				(unsigned long)scsi_get_lba(cmd),
+				ref_tag, start_ref_tag);
+
+	} else if (err_type == BGS_APPTAG_ERR_MASK) {
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+					0x10, 0x2);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_apptag_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+				(unsigned long)scsi_get_lba(cmd),
+				app_tag, start_app_tag);
+	}
+}
+
+
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA.  In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ *  0 - No error found
+ *  1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
+			struct lpfc_iocbq *pIocbOut)
+{
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+	int ret = 0;
+	uint32_t bghm = bgf->bghm;
+	uint32_t bgstat = bgf->bgstat;
+	uint64_t failing_sector = 0;
+
+	spin_lock(&_dump_buf_lock);
+	if (!_dump_buf_done) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
+			" Data for %u blocks to debugfs\n",
+				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+		lpfc_debug_save_data(phba, cmd);
+
+		/* If we have a prot sgl, save the DIF buffer */
+		if (lpfc_prot_group_type(phba, cmd) ==
+				LPFC_PG_TYPE_DIF_BUF) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
+				"Saving DIF for %u blocks to debugfs\n",
+				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+			lpfc_debug_save_dif(phba, cmd);
+		}
+
+		_dump_buf_done = 1;
+	}
+	spin_unlock(&_dump_buf_lock);
+
+	if (lpfc_bgs_get_invalid_prof(bgstat)) {
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9072 BLKGRD: Invalid BG Profile in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
+		ret = (-1);
+		goto out;
+	}
+
+	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
+		ret = (-1);
+		goto out;
+	}
+
+	if (lpfc_bgs_get_guard_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x1);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+		phba->bg_guard_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9055 BLKGRD: Guard Tag error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
+	}
+
+	if (lpfc_bgs_get_reftag_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x3);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_reftag_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9056 BLKGRD: Ref Tag error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
+	}
+
+	if (lpfc_bgs_get_apptag_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x2);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_apptag_err_cnt++;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9061 BLKGRD: App Tag error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
+	}
+
+	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+		/*
+		 * setup sense data descriptor 0 per SPC-4 as an information
+		 * field, and put the failing LBA in it.
+		 * This code assumes there was also a guard/app/ref tag error
+		 * indication.
+		 */
+		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
+		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
+		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
+		cmd->sense_buffer[10] = 0x80; /* Validity bit */
+
+		/* bghm is a "on the wire" FC frame based count */
+		switch (scsi_get_prot_op(cmd)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			bghm /= cmd->device->sector_size;
+			break;
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			bghm /= (cmd->device->sector_size +
+				sizeof(struct scsi_dif_tuple));
+			break;
+		}
+
+		failing_sector = scsi_get_lba(cmd);
+		failing_sector += bghm;
+
+		/* Descriptor Information */
+		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
+	}
+
+	if (!ret) {
+		/* No error was reported - problem in FW? */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+				"9057 BLKGRD: Unknown error in cmd"
+				" 0x%x lba 0x%llx blk cnt 0x%x "
+				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+				(unsigned long long)scsi_get_lba(cmd),
+				blk_rq_sectors(cmd->request), bgstat, bghm);
+
+		/* Calcuate what type of error it was */
+		lpfc_calc_bg_err(phba, lpfc_cmd);
+	}
+out:
+	return ret;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct scatterlist *sgel = NULL;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	struct sli4_sge *first_data_sgl;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	dma_addr_t physaddr;
+	uint32_t num_bde = 0;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+	int nseg;
+	struct ulp_bde64 *bde;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+
+		nseg = scsi_dma_map(scsi_cmnd);
+		if (unlikely(nseg <= 0))
+			return 1;
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl += 1;
+		first_data_sgl = sgl;
+		lpfc_cmd->seg_cnt = nseg;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
+				" %s: Too many sg segments from "
+				"dma_map_sg.  Config %d, seg_cnt %d\n",
+				__func__, phba->cfg_sg_seg_cnt,
+			       lpfc_cmd->seg_cnt);
+			lpfc_cmd->seg_cnt = 0;
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		/*
+		 * The driver established a maximum scatter-gather segment count
+		 * during probe that limits the number of sg elements in any
+		 * single scsi command.  Just run through the seg_cnt and format
+		 * the sge's.
+		 * When using SLI-3 the driver will try to fit all the BDEs into
+		 * the IOCB. If it can't then the BDEs get added to a BPL as it
+		 * does for SLI-2 mode.
+		 */
+		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+			physaddr = sg_dma_address(sgel);
+			dma_len = sg_dma_len(sgel);
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			if ((num_bde + 1) == nseg)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len = cpu_to_le32(dma_len);
+			dma_offset += dma_len;
+			sgl++;
+		}
+		/* setup the performance hint (first data BDE) if enabled */
+		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+			bde = (struct ulp_bde64 *)
+					&(iocb_cmd->unsli3.sli3Words[5]);
+			bde->addrLow = first_data_sgl->addr_lo;
+			bde->addrHigh = first_data_sgl->addr_hi;
+			bde->tus.f.bdeSize =
+					le32_to_cpu(first_data_sgl->sge_len);
+			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+			bde->tus.w = cpu_to_le32(bde->tus.w);
+		}
+	} else {
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
+	 * explicitly reinitialized.
+	 * all iocb memory resources are reused.
+	 */
+	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+
+	/*
+	 * If the OAS driver feature is enabled and the lun is enabled for
+	 * OAS, set the oas iocb related flags.
+	 */
+	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
+		scsi_cmnd->device->hostdata)->oas_enabled) {
+		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+		lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
+			scsi_cmnd->device->hostdata)->priority;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	uint32_t num_sge = 0;
+	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+	int prot_group_type = 0;
+	int fcpdl;
+	struct lpfc_vport *vport = phba->pport;
+
+	/*
+	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
+	 *  fcp_rsp regions to the first data sge entry
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+		datasegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_sglist(scsi_cmnd),
+					scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!datasegcnt))
+			return 1;
+
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+
+		sgl += 1;
+		lpfc_cmd->seg_cnt = datasegcnt;
+
+		/* First check if data segment count from SCSI Layer is good */
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+			goto err;
+
+		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+		switch (prot_group_type) {
+		case LPFC_PG_TYPE_NO_DIF:
+			/* Here we need to add a DISEED to the count */
+			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+				goto err;
+
+			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+					datasegcnt);
+
+			/* we should have 2 or more entries in buffer list */
+			if (num_sge < 2)
+				goto err;
+			break;
+
+		case LPFC_PG_TYPE_DIF_BUF:
+			/*
+			 * This type indicates that protection buffers are
+			 * passed to the driver, so that needs to be prepared
+			 * for DMA
+			 */
+			protsegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_prot_sglist(scsi_cmnd),
+					scsi_prot_sg_count(scsi_cmnd), datadir);
+			if (unlikely(!protsegcnt)) {
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			lpfc_cmd->prot_seg_cnt = protsegcnt;
+			/*
+			 * There is a minimun of 3 SGEs used for every
+			 * protection data segment.
+			 */
+			if ((lpfc_cmd->prot_seg_cnt * 3) >
+			    (phba->cfg_total_seg_cnt - 2))
+				goto err;
+
+			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+					datasegcnt, protsegcnt);
+
+			/* we should have 3 or more entries in buffer list */
+			if ((num_sge < 3) ||
+			    (num_sge > phba->cfg_total_seg_cnt))
+				goto err;
+			break;
+
+		case LPFC_PG_TYPE_INVALID:
+		default:
+			scsi_dma_unmap(scsi_cmnd);
+			lpfc_cmd->seg_cnt = 0;
+
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9083 Unexpected protection group %i\n",
+					prot_group_type);
+			return 1;
+		}
+	}
+
+	switch (scsi_get_prot_op(scsi_cmnd)) {
+	case SCSI_PROT_WRITE_STRIP:
+	case SCSI_PROT_READ_STRIP:
+		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+		break;
+	case SCSI_PROT_WRITE_INSERT:
+	case SCSI_PROT_READ_INSERT:
+		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+		break;
+	case SCSI_PROT_WRITE_PASS:
+	case SCSI_PROT_READ_PASS:
+		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+		break;
+	}
+
+	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
+	/*
+	 * For First burst, we may need to adjust the initial transfer
+	 * length for DIF
+	 */
+	if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+	    (fcpdl < vport->cfg_first_burst_size))
+		iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
+	/*
+	 * If the OAS driver feature is enabled and the lun is enabled for
+	 * OAS, set the oas iocb related flags.
+	 */
+	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
+		scsi_cmnd->device->hostdata)->oas_enabled)
+		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+
+	return 0;
+err:
+	if (lpfc_cmd->seg_cnt)
+		scsi_dma_unmap(scsi_cmnd);
+	if (lpfc_cmd->prot_seg_cnt)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+			     scsi_prot_sg_count(scsi_cmnd),
+			     scsi_cmnd->sc_data_direction);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"9084 Cannot setup S/G List for HBA"
+			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+			prot_group_type, num_sge);
+
+	lpfc_cmd->seg_cnt = 0;
+	lpfc_cmd->prot_seg_cnt = 0;
+	return 1;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * using BlockGuard.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static inline int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
+ * @phba: Pointer to hba context object.
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
+ * @rsp_iocb: Pointer to response iocb object which reported error.
+ *
+ * This function posts an event when there is a SCSI command reporting
+ * error from the scsi device.
+ **/
+static void
+lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
+		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+	uint32_t resp_info = fcprsp->rspStatus2;
+	uint32_t scsi_status = fcprsp->rspStatus3;
+	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+	struct lpfc_fast_path_event *fast_path_evt = NULL;
+	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
+	unsigned long flags;
+
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
+	/* If there is queuefull or busy condition send a scsi event */
+	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
+		(cmnd->result == SAM_STAT_BUSY)) {
+		fast_path_evt = lpfc_alloc_fast_evt(phba);
+		if (!fast_path_evt)
+			return;
+		fast_path_evt->un.scsi_evt.event_type =
+			FC_REG_SCSI_EVENT;
+		fast_path_evt->un.scsi_evt.subcategory =
+		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
+		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
+		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
+		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
+			&pnode->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
+			&pnode->nlp_nodename, sizeof(struct lpfc_name));
+	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
+		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
+		fast_path_evt = lpfc_alloc_fast_evt(phba);
+		if (!fast_path_evt)
+			return;
+		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
+			FC_REG_SCSI_EVENT;
+		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
+			LPFC_EVENT_CHECK_COND;
+		fast_path_evt->un.check_cond_evt.scsi_event.lun =
+			cmnd->device->lun;
+		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
+			&pnode->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
+			&pnode->nlp_nodename, sizeof(struct lpfc_name));
+		fast_path_evt->un.check_cond_evt.sense_key =
+			cmnd->sense_buffer[2] & 0xf;
+		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
+		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
+	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+		     fcpi_parm &&
+		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
+			((scsi_status == SAM_STAT_GOOD) &&
+			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
+		/*
+		 * If status is good or resid does not match with fcp_param and
+		 * there is valid fcpi_parm, then there is a read_check error
+		 */
+		fast_path_evt = lpfc_alloc_fast_evt(phba);
+		if (!fast_path_evt)
+			return;
+		fast_path_evt->un.read_check_error.header.event_type =
+			FC_REG_FABRIC_EVENT;
+		fast_path_evt->un.read_check_error.header.subcategory =
+			LPFC_EVENT_FCPRDCHKERR;
+		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
+			&pnode->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
+			&pnode->nlp_nodename, sizeof(struct lpfc_name));
+		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
+		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
+		fast_path_evt->un.read_check_error.fcpiparam =
+			fcpi_parm;
+	} else
+		return;
+
+	fast_path_evt->vport = vport;
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
+ * @phba: The HBA for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-3 interface spec.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	/*
+	 * There are only two special cases to consider.  (1) the scsi command
+	 * requested scatter-gather usage or (2) the scsi command allocated
+	 * a request buffer, but did not request use_sg.  There is a third
+	 * case, but it does not require resource deallocation.
+	 */
+	if (psb->seg_cnt > 0)
+		scsi_dma_unmap(psb->pCmd);
+	if (psb->prot_seg_cnt > 0)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
+				scsi_prot_sg_count(psb->pCmd),
+				psb->pCmd->sc_data_direction);
+}
+
+/**
+ * lpfc_handler_fcp_err - FCP response handler
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @rsp_iocb: The response IOCB which contains FCP error.
+ *
+ * This routine is called to process response IOCB with status field
+ * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
+ * based upon SCSI and FCP error.
+ **/
+static void
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+		    struct lpfc_iocbq *rsp_iocb)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
+	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+	uint32_t resp_info = fcprsp->rspStatus2;
+	uint32_t scsi_status = fcprsp->rspStatus3;
+	uint32_t *lp;
+	uint32_t host_status = DID_OK;
+	uint32_t rsplen = 0;
+	uint32_t fcpDl;
+	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
+
+
+	/*
+	 *  If this is a task management command, there is no
+	 *  scsi packet associated with this lpfc_cmd.  The driver
+	 *  consumes it.
+	 */
+	if (fcpcmd->fcpCntl2) {
+		scsi_status = 0;
+		goto out;
+	}
+
+	if (resp_info & RSP_LEN_VALID) {
+		rsplen = be32_to_cpu(fcprsp->rspRspLen);
+		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "2719 Invalid response length: "
+				 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
+				 cmnd->device->id,
+				 cmnd->device->lun, cmnd->cmnd[0],
+				 rsplen);
+			host_status = DID_ERROR;
+			goto out;
+		}
+		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "2757 Protocol failure detected during "
+				 "processing of FCP I/O op: "
+				 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
+				 cmnd->device->id,
+				 cmnd->device->lun, cmnd->cmnd[0],
+				 fcprsp->rspInfo3);
+			host_status = DID_ERROR;
+			goto out;
+		}
+	}
+
+	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+		if (snslen > SCSI_SENSE_BUFFERSIZE)
+			snslen = SCSI_SENSE_BUFFERSIZE;
+
+		if (resp_info & RSP_LEN_VALID)
+		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
+		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+	}
+	lp = (uint32_t *)cmnd->sense_buffer;
+
+	/* special handling for under run conditions */
+	if (!scsi_status && (resp_info & RESID_UNDER)) {
+		/* don't log under runs if fcp set... */
+		if (vport->cfg_log_verbose & LOG_FCP)
+			logit = LOG_FCP_ERROR;
+		/* unless operator says so */
+		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
+			logit = LOG_FCP_UNDER;
+	}
+
+	lpfc_printf_vlog(vport, KERN_WARNING, logit,
+			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 cmnd->cmnd[0], scsi_status,
+			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
+			 be32_to_cpu(fcprsp->rspResId),
+			 be32_to_cpu(fcprsp->rspSnsLen),
+			 be32_to_cpu(fcprsp->rspRspLen),
+			 fcprsp->rspInfo3);
+
+	scsi_set_resid(cmnd, 0);
+	fcpDl = be32_to_cpu(fcpcmd->fcpDl);
+	if (resp_info & RESID_UNDER) {
+		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
+				 "9025 FCP Read Underrun, expected %d, "
+				 "residual %d Data: x%x x%x x%x\n",
+				 fcpDl,
+				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
+				 cmnd->underflow);
+
+		/*
+		 * If there is an under run check if under run reported by
+		 * storage array is same as the under run reported by HBA.
+		 * If this is not same, there is a dropped frame.
+		 */
+		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+			fcpi_parm &&
+			(scsi_get_resid(cmnd) != fcpi_parm)) {
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_FCP | LOG_FCP_ERROR,
+					 "9026 FCP Read Check Error "
+					 "and Underrun Data: x%x x%x x%x x%x\n",
+					 fcpDl,
+					 scsi_get_resid(cmnd), fcpi_parm,
+					 cmnd->cmnd[0]);
+			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+			host_status = DID_ERROR;
+		}
+		/*
+		 * The cmnd->underflow is the minimum number of bytes that must
+		 * be transferred for this command.  Provided a sense condition
+		 * is not present, make sure the actual amount transferred is at
+		 * least the underflow value or fail.
+		 */
+		if (!(resp_info & SNS_LEN_VALID) &&
+		    (scsi_status == SAM_STAT_GOOD) &&
+		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
+		     < cmnd->underflow)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+					 "9027 FCP command x%x residual "
+					 "underrun converted to error "
+					 "Data: x%x x%x x%x\n",
+					 cmnd->cmnd[0], scsi_bufflen(cmnd),
+					 scsi_get_resid(cmnd), cmnd->underflow);
+			host_status = DID_ERROR;
+		}
+	} else if (resp_info & RESID_OVER) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "9028 FCP command x%x residual overrun error. "
+				 "Data: x%x x%x\n", cmnd->cmnd[0],
+				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+		host_status = DID_ERROR;
+
+	/*
+	 * Check SLI validation that all the transfer was actually done
+	 * (fcpi_parm should be zero). Apply check only to reads.
+	 */
+	} else if (fcpi_parm) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
+				 "9029 FCP %s Check Error xri x%x  Data: "
+				 "x%x x%x x%x x%x x%x\n",
+				 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
+				 "Read" : "Write"),
+				 ((phba->sli_rev == LPFC_SLI_REV4) ?
+				 lpfc_cmd->cur_iocbq.sli4_xritag :
+				 rsp_iocb->iocb.ulpContext),
+				 fcpDl, be32_to_cpu(fcprsp->rspResId),
+				 fcpi_parm, cmnd->cmnd[0], scsi_status);
+
+		/* There is some issue with the LPe12000 that causes it
+		 * to miscalculate the fcpi_parm and falsely trip this
+		 * recovery logic.  Detect this case and don't error when true.
+		 */
+		if (fcpi_parm > fcpDl)
+			goto out;
+
+		switch (scsi_status) {
+		case SAM_STAT_GOOD:
+		case SAM_STAT_CHECK_CONDITION:
+			/* Fabric dropped a data frame. Fail any successful
+			 * command in which we detected dropped frames.
+			 * A status of good or some check conditions could
+			 * be considered a successful command.
+			 */
+			host_status = DID_ERROR;
+			break;
+		}
+		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+	}
+
+ out:
+	cmnd->result = ScsiResult(host_status, scsi_status);
+	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
+}
+
+/**
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
+ * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
+ * If scsi-mq is enabled, get the default block layer mapping of software queues
+ * to hardware queues. This information is saved in request tag.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
+ **/
+int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+				  struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+	struct lpfc_vector_map_info *cpup;
+	int chann, cpu;
+	uint32_t tag;
+	uint16_t hwq;
+
+	if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
+		tag = blk_mq_unique_tag(cmnd->request);
+		hwq = blk_mq_unique_tag_to_hwq(tag);
+
+		return hwq;
+	}
+
+	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
+	    && phba->cfg_fcp_io_channel > 1) {
+		cpu = smp_processor_id();
+		if (cpu < phba->sli4_hba.num_present_cpu) {
+			cpup = phba->sli4_hba.cpu_map;
+			cpup += cpu;
+			return cpup->channel_id;
+		}
+	}
+	chann = atomic_add_return(1, &phba->fcp_qidx);
+	chann = chann % phba->cfg_fcp_io_channel;
+	return chann;
+}
+
+
+/**
+ * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
+ * @phba: The Hba for which this call is being executed.
+ * @pIocbIn: The command IOCBQ for the scsi cmnd.
+ * @pIocbOut: The response IOCBQ for the scsi cmnd.
+ *
+ * This routine assigns scsi command result by looking into response IOCB
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
+static void
+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+			struct lpfc_iocbq *pIocbOut)
+{
+	struct lpfc_scsi_buf *lpfc_cmd =
+		(struct lpfc_scsi_buf *) pIocbIn->context1;
+	struct lpfc_vport      *vport = pIocbIn->vport;
+	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct scsi_cmnd *cmd;
+	int depth;
+	unsigned long flags;
+	struct lpfc_fast_path_event *fast_path_evt;
+	struct Scsi_Host *shost;
+	uint32_t logit = LOG_FCP;
+
+	atomic_inc(&phba->fc4ScsiIoCmpls);
+
+	/* Sanity check on return of outstanding command */
+	cmd = lpfc_cmd->pCmd;
+	if (!cmd)
+		return;
+	shost = cmd->device->host;
+
+	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
+	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+	/* pick up SLI4 exhange busy status from HBA */
+	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (lpfc_cmd->prot_data_type) {
+		struct scsi_dif_tuple *src = NULL;
+
+		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+		/*
+		 * Used to restore any changes to protection
+		 * data for error injection.
+		 */
+		switch (lpfc_cmd->prot_data_type) {
+		case LPFC_INJERR_REFTAG:
+			src->ref_tag =
+				lpfc_cmd->prot_data;
+			break;
+		case LPFC_INJERR_APPTAG:
+			src->app_tag =
+				(uint16_t)lpfc_cmd->prot_data;
+			break;
+		case LPFC_INJERR_GUARD:
+			src->guard_tag =
+				(uint16_t)lpfc_cmd->prot_data;
+			break;
+		default:
+			break;
+		}
+
+		lpfc_cmd->prot_data = 0;
+		lpfc_cmd->prot_data_type = 0;
+		lpfc_cmd->prot_data_segment = NULL;
+	}
+#endif
+
+	if (pnode && NLP_CHK_NODE_ACT(pnode))
+		atomic_dec(&pnode->cmd_pending);
+
+	if (lpfc_cmd->status) {
+		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+		    (lpfc_cmd->result & IOERR_DRVR_MASK))
+			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+		else if (lpfc_cmd->status >= IOSTAT_CNT)
+			lpfc_cmd->status = IOSTAT_DEFAULT;
+		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
+		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
+		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
+		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
+			logit = 0;
+		else
+			logit = LOG_FCP | LOG_FCP_UNDER;
+		lpfc_printf_vlog(vport, KERN_WARNING, logit,
+			 "9030 FCP cmd x%x failed <%d/%lld> "
+			 "status: x%x result: x%x "
+			 "sid: x%x did: x%x oxid: x%x "
+			 "Data: x%x x%x\n",
+			 cmd->cmnd[0],
+			 cmd->device ? cmd->device->id : 0xffff,
+			 cmd->device ? cmd->device->lun : 0xffff,
+			 lpfc_cmd->status, lpfc_cmd->result,
+			 vport->fc_myDID,
+			 (pnode) ? pnode->nlp_DID : 0,
+			 phba->sli_rev == LPFC_SLI_REV4 ?
+			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+			 pIocbOut->iocb.ulpContext,
+			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+
+		switch (lpfc_cmd->status) {
+		case IOSTAT_FCP_RSP_ERROR:
+			/* Call FCP RSP handler to determine result */
+			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
+			break;
+		case IOSTAT_NPORT_BSY:
+		case IOSTAT_FABRIC_BSY:
+			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+			fast_path_evt = lpfc_alloc_fast_evt(phba);
+			if (!fast_path_evt)
+				break;
+			fast_path_evt->un.fabric_evt.event_type =
+				FC_REG_FABRIC_EVENT;
+			fast_path_evt->un.fabric_evt.subcategory =
+				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+					&pnode->nlp_portname,
+					sizeof(struct lpfc_name));
+				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+					&pnode->nlp_nodename,
+					sizeof(struct lpfc_name));
+			}
+			fast_path_evt->vport = vport;
+			fast_path_evt->work_evt.evt =
+				LPFC_EVT_FASTPATH_MGMT_EVT;
+			spin_lock_irqsave(&phba->hbalock, flags);
+			list_add_tail(&fast_path_evt->work_evt.evt_listp,
+				&phba->work_list);
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			lpfc_worker_wake_up(phba);
+			break;
+		case IOSTAT_LOCAL_REJECT:
+		case IOSTAT_REMOTE_STOP:
+			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+			    lpfc_cmd->result ==
+					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+			    lpfc_cmd->result ==
+					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+				break;
+			}
+			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
+			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
+				cmd->result = ScsiResult(DID_REQUEUE, 0);
+				break;
+			}
+			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
+				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+					/*
+					 * This is a response for a BG enabled
+					 * cmd. Parse BG error
+					 */
+					lpfc_parse_bg_err(phba, lpfc_cmd,
+							pIocbOut);
+					break;
+				} else {
+					lpfc_printf_vlog(vport, KERN_WARNING,
+							LOG_BG,
+							"9031 non-zero BGSTAT "
+							"on unprotected cmd\n");
+				}
+			}
+			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+				&& (phba->sli_rev == LPFC_SLI_REV4)
+				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
+				/* This IO was aborted by the target, we don't
+				 * know the rxid and because we did not send the
+				 * ABTS we cannot generate and RRQ.
+				 */
+				lpfc_set_rrq_active(phba, pnode,
+					lpfc_cmd->cur_iocbq.sli4_lxritag,
+					0, 0);
+			}
+		/* else: fall through */
+		default:
+			cmd->result = ScsiResult(DID_ERROR, 0);
+			break;
+		}
+
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
+		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+						 SAM_STAT_BUSY);
+	} else
+		cmd->result = ScsiResult(DID_OK, 0);
+
+	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "0710 Iodone <%d/%llu> cmd %p, error "
+				 "x%x SNS x%x x%x Data: x%x x%x\n",
+				 cmd->device->id, cmd->device->lun, cmd,
+				 cmd->result, *lp, *(lp + 3), cmd->retries,
+				 scsi_get_resid(cmd));
+	}
+
+	lpfc_update_stats(phba, lpfc_cmd);
+	if (vport->cfg_max_scsicmpl_time &&
+	   time_after(jiffies, lpfc_cmd->start_time +
+		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+			if (pnode->cmd_qdepth >
+				atomic_read(&pnode->cmd_pending) &&
+				(atomic_read(&pnode->cmd_pending) >
+				LPFC_MIN_TGT_QDEPTH) &&
+				((cmd->cmnd[0] == READ_10) ||
+				(cmd->cmnd[0] == WRITE_10)))
+				pnode->cmd_qdepth =
+					atomic_read(&pnode->cmd_pending);
+
+			pnode->last_change_time = jiffies;
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
+	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
+		   time_after(jiffies, pnode->last_change_time +
+			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+			spin_lock_irqsave(shost->host_lock, flags);
+			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
+				/ 100;
+			depth = depth ? depth : 1;
+			pnode->cmd_qdepth += depth;
+			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
+				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
+			pnode->last_change_time = jiffies;
+			spin_unlock_irqrestore(shost->host_lock, flags);
+		}
+	}
+
+	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+
+	/* If pCmd was set to NULL from abort path, do not call scsi_done */
+	if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "5688 FCP cmd already NULL, sid: 0x%06x, "
+				 "did: 0x%06x, oxid: 0x%04x\n",
+				 vport->fc_myDID,
+				 (pnode) ? pnode->nlp_DID : 0,
+				 phba->sli_rev == LPFC_SLI_REV4 ?
+				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
+		return;
+	}
+
+	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
+	cmd->scsi_done(cmd);
+
+	/*
+	 * If there is a thread waiting for command completion
+	 * wake up the thread.
+	 */
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (lpfc_cmd->waitq)
+		wake_up(lpfc_cmd->waitq);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+	lpfc_release_scsi_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+	int i, j;
+	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+	     i += sizeof(uint32_t), j++) {
+		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+	}
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI3 interface spec.
+ **/
+static void
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+		    struct lpfc_nodelist *pnode)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+	int datadir = scsi_cmnd->sc_data_direction;
+	uint8_t *ptr;
+	bool sli4;
+	uint32_t fcpdl;
+
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
+	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+	/* clear task management bits */
+	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
+
+	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+			&lpfc_cmd->fcp_cmnd->fcp_lun);
+
+	ptr = &fcp_cmnd->fcpCdb[0];
+	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
+		ptr += scsi_cmnd->cmd_len;
+		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
+	}
+
+	fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+
+	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
+	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		if (datadir == DMA_TO_DEVICE) {
+			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+			iocb_cmd->ulpPU = PARM_READ_CHECK;
+			if (vport->cfg_first_burst_size &&
+			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
+				fcpdl = scsi_bufflen(scsi_cmnd);
+				if (fcpdl < vport->cfg_first_burst_size)
+					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
+				else
+					piocbq->iocb.un.fcpi.fcpi_XRdy =
+						vport->cfg_first_burst_size;
+			}
+			fcp_cmnd->fcpCntl3 = WRITE_DATA;
+			atomic_inc(&phba->fc4ScsiOutputRequests);
+		} else {
+			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+			iocb_cmd->ulpPU = PARM_READ_CHECK;
+			fcp_cmnd->fcpCntl3 = READ_DATA;
+			atomic_inc(&phba->fc4ScsiInputRequests);
+		}
+	} else {
+		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
+		iocb_cmd->un.fcpi.fcpi_parm = 0;
+		iocb_cmd->ulpPU = 0;
+		fcp_cmnd->fcpCntl3 = 0;
+		atomic_inc(&phba->fc4ScsiControlRequests);
+	}
+	if (phba->sli_rev == 3 &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
+	/*
+	 * Finish initializing those IOCB fields that are independent
+	 * of the scsi_cmnd request_buffer
+	 */
+	piocbq->iocb.ulpContext = pnode->nlp_rpi;
+	if (sli4)
+		piocbq->iocb.ulpContext =
+		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
+	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+		piocbq->iocb.ulpFCP2Rcvy = 1;
+	else
+		piocbq->iocb.ulpFCP2Rcvy = 0;
+
+	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
+	piocbq->context1  = lpfc_cmd;
+	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+	piocbq->vport = vport;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-3 interface spec.
+ *
+ * Return codes:
+ *   0 - Error
+ *   1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+			     struct lpfc_scsi_buf *lpfc_cmd,
+			     uint64_t lun,
+			     uint8_t task_mgmt_cmd)
+{
+	struct lpfc_iocbq *piocbq;
+	IOCB_t *piocb;
+	struct fcp_cmnd *fcp_cmnd;
+	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+		return 0;
+
+	piocbq = &(lpfc_cmd->cur_iocbq);
+	piocbq->vport = vport;
+
+	piocb = &piocbq->iocb;
+
+	fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	/* Clear out any old data in the FCP command area */
+	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+	if (vport->phba->sli_rev == 3 &&
+	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
+	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
+	piocb->ulpContext = ndlp->nlp_rpi;
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+		piocb->ulpContext =
+		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+	}
+	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
+	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+	piocb->ulpPU = 0;
+	piocb->un.fcpi.fcpi_parm = 0;
+
+	/* ulpTimeout is only one byte */
+	if (lpfc_cmd->timeout > 0xff) {
+		/*
+		 * Do not timeout the command at the firmware level.
+		 * The driver will provide the timeout mechanism.
+		 */
+		piocb->ulpTimeout = 0;
+	} else
+		piocb->ulpTimeout = lpfc_cmd->timeout;
+
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+
+	return 1;
+}
+
+/**
+ * lpfc_scsi_api_table_setup - Set up scsi api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SCSI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
+	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
+		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
+		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
+		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
+		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1418 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+	return 0;
+}
+
+/**
+ * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
+ * @phba: The Hba for which this call is being executed.
+ * @cmdiocbq: Pointer to lpfc_iocbq data structure.
+ * @rspiocbq: Pointer to lpfc_iocbq data structure.
+ *
+ * This routine is IOCB completion routine for device reset and target reset
+ * routine. This routine release scsi buffer associated with lpfc_cmd.
+ **/
+static void
+lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct lpfc_scsi_buf *lpfc_cmd =
+		(struct lpfc_scsi_buf *) cmdiocbq->context1;
+	if (lpfc_cmd)
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+	return;
+}
+
+/**
+ * lpfc_info - Info entry point of scsi_host_template data structure
+ * @host: The scsi host for which this call is being executed.
+ *
+ * This routine provides module information about hba.
+ *
+ * Reutrn code:
+ *   Pointer to char - Success.
+ **/
+const char *
+lpfc_info(struct Scsi_Host *host)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int len, link_speed = 0;
+	static char  lpfcinfobuf[384];
+
+	memset(lpfcinfobuf,0,384);
+	if (phba && phba->pcidev){
+		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
+		len = strlen(lpfcinfobuf);
+		snprintf(lpfcinfobuf + len,
+			384-len,
+			" on PCI bus %02x device %02x irq %d",
+			phba->pcidev->bus->number,
+			phba->pcidev->devfn,
+			phba->pcidev->irq);
+		len = strlen(lpfcinfobuf);
+		if (phba->Port[0]) {
+			snprintf(lpfcinfobuf + len,
+				 384-len,
+				 " port %s",
+				 phba->Port);
+		}
+		len = strlen(lpfcinfobuf);
+		link_speed = lpfc_sli_port_speed_get(phba);
+		if (link_speed != 0)
+			snprintf(lpfcinfobuf + len, 384-len,
+				 " Logical Link Speed: %d Mbps", link_speed);
+	}
+	return lpfcinfobuf;
+}
+
+/**
+ * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
+ * The default value of cfg_poll_tmo is 10 milliseconds.
+ **/
+static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
+{
+	unsigned long  poll_tmo_expires =
+		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
+
+	if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
+		mod_timer(&phba->fcp_poll_timer,
+			  poll_tmo_expires);
+}
+
+/**
+ * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine starts the fcp_poll_timer of @phba.
+ **/
+void lpfc_poll_start_timer(struct lpfc_hba * phba)
+{
+	lpfc_poll_rearm_timer(phba);
+}
+
+/**
+ * lpfc_poll_timeout - Restart polling timer
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine restarts fcp_poll timer, when FCP ring  polling is enable
+ * and FCP Ring interrupt is disable.
+ **/
+
+void lpfc_poll_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+			lpfc_poll_rearm_timer(phba);
+	}
+}
+
+/**
+ * lpfc_queuecommand - scsi_host_template queuecommand entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ * @done: Pointer to done routine.
+ *
+ * Driver registers this routine to scsi midlayer to submit a @cmd to process.
+ * This routine prepares an IOCB from scsi command and provides to firmware.
+ * The @done callback is invoked after driver finished processing the command.
+ *
+ * Return value :
+ *   0 - Success
+ *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
+ **/
+static int
+lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+	int err;
+
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+	err = fc_remote_port_chkready(rport);
+	if (err) {
+		cmnd->result = err;
+		goto out_fail_command;
+	}
+	ndlp = rdata->pnode;
+
+	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
+		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
+				" op:%02x str=%s without registering for"
+				" BlockGuard - Rejecting command\n",
+				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+				dif_op_str[scsi_get_prot_op(cmnd)]);
+		goto out_fail_command;
+	}
+
+	/*
+	 * Catch race where our node has transitioned, but the
+	 * transport is still transitioning.
+	 */
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		goto out_tgt_busy;
+	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+		goto out_tgt_busy;
+
+	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
+	if (lpfc_cmd == NULL) {
+		lpfc_rampdown_queue_depth(phba);
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
+				 "0707 driver's buffer pool is empty, "
+				 "IO busied\n");
+		goto out_host_busy;
+	}
+
+	/*
+	 * Store the midlayer's command structure for the completion phase
+	 * and complete the command initialization.
+	 */
+	lpfc_cmd->pCmd  = cmnd;
+	lpfc_cmd->rdata = rdata;
+	lpfc_cmd->timeout = 0;
+	lpfc_cmd->start_time = jiffies;
+	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+
+	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+		if (vport->phba->cfg_enable_bg) {
+			lpfc_printf_vlog(vport,
+					 KERN_INFO, LOG_SCSI_CMD,
+					 "9033 BLKGRD: rcvd %s cmd:x%x "
+					 "sector x%llx cnt %u pt %x\n",
+					 dif_op_str[scsi_get_prot_op(cmnd)],
+					 cmnd->cmnd[0],
+					 (unsigned long long)scsi_get_lba(cmnd),
+					 blk_rq_sectors(cmnd->request),
+					 (cmnd->cmnd[1]>>5));
+		}
+		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+	} else {
+		if (vport->phba->cfg_enable_bg) {
+			lpfc_printf_vlog(vport,
+					 KERN_INFO, LOG_SCSI_CMD,
+					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
+					 "x%x sector x%llx cnt %u pt %x\n",
+					 cmnd->cmnd[0],
+					 (unsigned long long)scsi_get_lba(cmnd),
+					 blk_rq_sectors(cmnd->request),
+					 (cmnd->cmnd[1]>>5));
+		}
+		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+	}
+
+	if (err)
+		goto out_host_busy_free_buf;
+
+	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+
+	atomic_inc(&ndlp->cmd_pending);
+	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+	if (err) {
+		atomic_dec(&ndlp->cmd_pending);
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "3376 FCP could not issue IOCB err %x"
+				 "FCP cmd x%x <%d/%llu> "
+				 "sid: x%x did: x%x oxid: x%x "
+				 "Data: x%x x%x x%x x%x\n",
+				 err, cmnd->cmnd[0],
+				 cmnd->device ? cmnd->device->id : 0xffff,
+				 cmnd->device ? cmnd->device->lun : (u64) -1,
+				 vport->fc_myDID, ndlp->nlp_DID,
+				 phba->sli_rev == LPFC_SLI_REV4 ?
+				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
+				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
+				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
+				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
+				 (uint32_t)
+				 (cmnd->request->timeout / 1000));
+
+		switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
+		case WRITE_DATA:
+			atomic_dec(&phba->fc4ScsiOutputRequests);
+			break;
+		case READ_DATA:
+			atomic_dec(&phba->fc4ScsiInputRequests);
+			break;
+		default:
+			atomic_dec(&phba->fc4ScsiControlRequests);
+		}
+		goto out_host_busy_free_buf;
+	}
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+			lpfc_poll_rearm_timer(phba);
+	}
+
+	return 0;
+
+ out_host_busy_free_buf:
+	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+	lpfc_release_scsi_buf(phba, lpfc_cmd);
+ out_host_busy:
+	return SCSI_MLQUEUE_HOST_BUSY;
+
+ out_tgt_busy:
+	return SCSI_MLQUEUE_TARGET_BUSY;
+
+ out_fail_command:
+	cmnd->scsi_done(cmnd);
+	return 0;
+}
+
+
+/**
+ * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine aborts @cmnd pending in base driver.
+ *
+ * Return code :
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
+static int
+lpfc_abort_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *iocb;
+	struct lpfc_iocbq *abtsiocb;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	IOCB_t *cmd, *icmd;
+	int ret = SUCCESS, status = 0;
+	struct lpfc_sli_ring *pring_s4;
+	int ret_val;
+	unsigned long flags, iflags;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+	status = fc_block_scsi_eh(cmnd);
+	if (status != 0 && status != SUCCESS)
+		return status;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	/* driver queued commands are in process of being flushed */
+	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			"3168 SCSI Layer abort requested I/O has been "
+			"flushed by LLD.\n");
+		return FAILED;
+	}
+
+	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
+	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
+			 "x%x ID %d LUN %llu\n",
+			 SUCCESS, cmnd->device->id, cmnd->device->lun);
+		return SUCCESS;
+	}
+
+	iocb = &lpfc_cmd->cur_iocbq;
+	/* the command is in process of being cancelled */
+	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			"3169 SCSI Layer abort requested I/O has been "
+			"cancelled by LLD.\n");
+		return FAILED;
+	}
+	/*
+	 * If pCmd field of the corresponding lpfc_scsi_buf structure
+	 * points to a different SCSI command, then the driver has
+	 * already completed this command, but the midlayer did not
+	 * see the completion before the eh fired. Just return SUCCESS.
+	 */
+	if (lpfc_cmd->pCmd != cmnd) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			"3170 SCSI Layer abort requested I/O has been "
+			"completed by LLD.\n");
+		goto out_unlock;
+	}
+
+	BUG_ON(iocb->context1 != lpfc_cmd);
+
+	/* abort issued in recovery is still in progress */
+	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			 "3389 SCSI Layer I/O Abort Request is pending\n");
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		goto wait_for_cmpl;
+	}
+
+	abtsiocb = __lpfc_sli_get_iocbq(phba);
+	if (abtsiocb == NULL) {
+		ret = FAILED;
+		goto out_unlock;
+	}
+
+	/* Indicate the IO is being aborted by the driver. */
+	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+	/*
+	 * The scsi command can not be in txq and it is in flight because the
+	 * pCmd is still pointig at the SCSI command we have to abort. There
+	 * is no need to search the txcmplq. Just send an abort to the FW.
+	 */
+
+	cmd = &iocb->iocb;
+	icmd = &abtsiocb->iocb;
+	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+	icmd->un.acxri.abortContextTag = cmd->ulpContext;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
+	else
+		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+	icmd->ulpLe = 1;
+	icmd->ulpClass = cmd->ulpClass;
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocb->hba_wqidx = iocb->hba_wqidx;
+	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+	if (iocb->iocb_flag & LPFC_IO_FOF)
+		abtsiocb->iocb_flag |= LPFC_IO_FOF;
+
+	if (lpfc_is_link_up(phba))
+		icmd->ulpCommand = CMD_ABORT_XRI_CN;
+	else
+		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+
+	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+	abtsiocb->vport = vport;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		pring_s4 = lpfc_sli4_calc_ring(phba, iocb);
+		if (pring_s4 == NULL) {
+			ret = FAILED;
+			goto out_unlock;
+		}
+		/* Note: both hbalock and ring_lock must be set here */
+		spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+						abtsiocb, 0);
+		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+	} else {
+		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+						abtsiocb, 0);
+	}
+	/* no longer need the lock after this point */
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+
+	if (ret_val == IOCB_ERROR) {
+		lpfc_sli_release_iocbq(phba, abtsiocb);
+		ret = FAILED;
+		goto out;
+	}
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+wait_for_cmpl:
+	lpfc_cmd->waitq = &waitq;
+	/* Wait for abort to complete */
+	wait_event_timeout(waitq,
+			  (lpfc_cmd->pCmd != cmnd),
+			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	lpfc_cmd->waitq = NULL;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+	if (lpfc_cmd->pCmd == cmnd) {
+		ret = FAILED;
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "0748 abort handler timed out waiting "
+				 "for aborting I/O (xri:x%x) to complete: "
+				 "ret %#x, ID %d, LUN %llu\n",
+				 iocb->sli4_xritag, ret,
+				 cmnd->device->id, cmnd->device->lun);
+	}
+	goto out;
+
+out_unlock:
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+out:
+	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
+			 "LUN %llu\n", ret, cmnd->device->id,
+			 cmnd->device->lun);
+	return ret;
+}
+
+static char *
+lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
+{
+	switch (task_mgmt_cmd) {
+	case FCP_ABORT_TASK_SET:
+		return "ABORT_TASK_SET";
+	case FCP_CLEAR_TASK_SET:
+		return "FCP_CLEAR_TASK_SET";
+	case FCP_BUS_RESET:
+		return "FCP_BUS_RESET";
+	case FCP_LUN_RESET:
+		return "FCP_LUN_RESET";
+	case FCP_TARGET_RESET:
+		return "FCP_TARGET_RESET";
+	case FCP_CLEAR_ACA:
+		return "FCP_CLEAR_ACA";
+	case FCP_TERMINATE_TASK:
+		return "FCP_TERMINATE_TASK";
+	default:
+		return "unknown";
+	}
+}
+
+
+/**
+ * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ *
+ * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
+ *
+ * Return code :
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
+static int
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+	uint32_t rsp_info;
+	uint32_t rsp_len;
+	uint8_t  rsp_info_code;
+	int ret = FAILED;
+
+
+	if (fcprsp == NULL)
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "0703 fcp_rsp is missing\n");
+	else {
+		rsp_info = fcprsp->rspStatus2;
+		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
+		rsp_info_code = fcprsp->rspInfo3;
+
+
+		lpfc_printf_vlog(vport, KERN_INFO,
+				 LOG_FCP,
+				 "0706 fcp_rsp valid 0x%x,"
+				 " rsp len=%d code 0x%x\n",
+				 rsp_info,
+				 rsp_len, rsp_info_code);
+
+		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+			switch (rsp_info_code) {
+			case RSP_NO_FAILURE:
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0715 Task Mgmt No Failure\n");
+				ret = SUCCESS;
+				break;
+			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0716 Task Mgmt Target "
+						"reject\n");
+				break;
+			case RSP_TM_NOT_COMPLETED: /* TM failed */
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0717 Task Mgmt Target "
+						"failed TM\n");
+				break;
+			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+						 "0718 Task Mgmt to invalid "
+						"LUN\n");
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+
+/**
+ * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
+ * @vport: The virtual port for which this call is being executed.
+ * @rdata: Pointer to remote port local data
+ * @tgt_id: Target ID of remote device.
+ * @lun_id: Lun number for the TMF
+ * @task_mgmt_cmd: type of TMF to send
+ *
+ * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
+ * a remote port.
+ *
+ * Return Code:
+ *   0x2003 - Error
+ *   0x2002 - Success.
+ **/
+static int
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+		   unsigned int tgt_id, uint64_t lun_id,
+		   uint8_t task_mgmt_cmd)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_iocbq *iocbqrsp;
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *pnode;
+	int ret;
+	int status;
+
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+	if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+		return FAILED;
+	pnode = rdata->pnode;
+
+	lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
+	if (lpfc_cmd == NULL)
+		return FAILED;
+	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
+	lpfc_cmd->rdata = rdata;
+	lpfc_cmd->pCmd = cmnd;
+
+	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+					   task_mgmt_cmd);
+	if (!status) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
+
+	iocbq = &lpfc_cmd->cur_iocbq;
+	iocbqrsp = lpfc_sli_get_iocbq(phba);
+	if (iocbqrsp == NULL) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
+	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			 "0702 Issue %s to TGT %d LUN %llu "
+			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
+			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
+			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+			 iocbq->iocb_flag);
+
+	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
+					  iocbq, iocbqrsp, lpfc_cmd->timeout);
+	if ((status != IOCB_SUCCESS) ||
+	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
+		if (status != IOCB_SUCCESS ||
+		    iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+					 "0727 TMF %s to TGT %d LUN %llu "
+					 "failed (%d, %d) iocb_flag x%x\n",
+					 lpfc_taskmgmt_name(task_mgmt_cmd),
+					 tgt_id, lun_id,
+					 iocbqrsp->iocb.ulpStatus,
+					 iocbqrsp->iocb.un.ulpWord[4],
+					 iocbq->iocb_flag);
+		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
+		if (status == IOCB_SUCCESS) {
+			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+				/* Something in the FCP_RSP was invalid.
+				 * Check conditions */
+				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
+			else
+				ret = FAILED;
+		} else if (status == IOCB_TIMEDOUT) {
+			ret = TIMEOUT_ERROR;
+		} else {
+			ret = FAILED;
+		}
+	} else
+		ret = SUCCESS;
+
+	lpfc_sli_release_iocbq(phba, iocbqrsp);
+
+	if (ret != TIMEOUT_ERROR)
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+
+	return ret;
+}
+
+/**
+ * lpfc_chk_tgt_mapped -
+ * @vport: The virtual port to check on
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine delays until the scsi target (aka rport) for the
+ * command exists (is present and logged in) or we declare it non-existent.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+{
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *pnode;
+	unsigned long later;
+
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	/*
+	 * If target is not in a MAPPED state, delay until
+	 * target is rediscovered or devloss timeout expires.
+	 */
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies)) {
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+			return FAILED;
+		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
+			return SUCCESS;
+		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+		if (!rdata)
+			return FAILED;
+		pnode = rdata->pnode;
+	}
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
+	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+		return FAILED;
+	return SUCCESS;
+}
+
+/**
+ * lpfc_reset_flush_io_context -
+ * @vport: The virtual port (scsi_host) for the flush context
+ * @tgt_id: If aborting by Target contect - specifies the target id
+ * @lun_id: If aborting by Lun context - specifies the lun id
+ * @context: specifies the context level to flush at.
+ *
+ * After a reset condition via TMF, we need to flush orphaned i/o
+ * contexts from the adapter. This routine aborts any contexts
+ * outstanding, then waits for their completions. The wait is
+ * bounded by devloss_tmo though.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+			uint64_t lun_id, lpfc_ctx_cmd context)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned long later;
+	int cnt;
+
+	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+	if (cnt)
+		lpfc_sli_abort_taskmgmt(vport,
+					&phba->sli.sli3_ring[LPFC_FCP_RING],
+					tgt_id, lun_id, context);
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies) && cnt) {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+	}
+	if (cnt) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0724 I/O flush failure for context %s : cnt x%x\n",
+			((context == LPFC_CTX_LUN) ? "LUN" :
+			 ((context == LPFC_CTX_TGT) ? "TGT" :
+			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
+			cnt);
+		return FAILED;
+	}
+	return SUCCESS;
+}
+
+/**
+ * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a LUN_RESET task management
+ * command.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *pnode;
+	unsigned tgt_id = cmnd->device->id;
+	uint64_t lun_id = cmnd->device->lun;
+	struct lpfc_scsi_event_header scsi_event;
+	int status;
+
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+	if (!rdata || !rdata->pnode) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "0798 Device Reset rport failure: rdata x%p\n",
+				 rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	status = fc_block_scsi_eh(cmnd);
+	if (status != 0 && status != SUCCESS)
+		return status;
+
+	status = lpfc_chk_tgt_mapped(vport, cmnd);
+	if (status == FAILED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0721 Device Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+
+	scsi_event.event_type = FC_REG_SCSI_EVENT;
+	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
+	scsi_event.lun = lun_id;
+	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+						FCP_LUN_RESET);
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0713 SCSI layer issued Device Reset (%d, %llu) "
+			 "return x%x\n", tgt_id, lun_id, status);
+
+	/*
+	 * We have to clean up i/o as : they may be orphaned by the TMF;
+	 * or if the TMF failed, they may be in an indeterminate state.
+	 * So, continue on.
+	 * We will report success if all the i/o aborts successfully.
+	 */
+	if (status == SUCCESS)
+		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+						LPFC_CTX_LUN);
+
+	return status;
+}
+
+/**
+ * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a target reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *pnode;
+	unsigned tgt_id = cmnd->device->id;
+	uint64_t lun_id = cmnd->device->lun;
+	struct lpfc_scsi_event_header scsi_event;
+	int status;
+
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0799 Target Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	status = fc_block_scsi_eh(cmnd);
+	if (status != 0 && status != SUCCESS)
+		return status;
+
+	status = lpfc_chk_tgt_mapped(vport, cmnd);
+	if (status == FAILED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0722 Target Reset rport failure: rdata x%p\n", rdata);
+		if (pnode) {
+			spin_lock_irq(shost->host_lock);
+			pnode->nlp_flag &= ~NLP_NPR_ADISC;
+			pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+			spin_unlock_irq(shost->host_lock);
+		}
+		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+					  LPFC_CTX_TGT);
+		return FAST_IO_FAIL;
+	}
+
+	scsi_event.event_type = FC_REG_SCSI_EVENT;
+	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+	scsi_event.lun = 0;
+	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+					FCP_TARGET_RESET);
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0723 SCSI layer issued Target Reset (%d, %llu) "
+			 "return x%x\n", tgt_id, lun_id, status);
+
+	/*
+	 * We have to clean up i/o as : they may be orphaned by the TMF;
+	 * or if the TMF failed, they may be in an indeterminate state.
+	 * So, continue on.
+	 * We will report success if all the i/o aborts successfully.
+	 */
+	if (status == SUCCESS)
+		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+					  LPFC_CTX_TGT);
+	return status;
+}
+
+/**
+ * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does target reset to all targets on @cmnd->device->host.
+ * This emulates Parallel SCSI Bus Reset Semantics.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct lpfc_scsi_event_header scsi_event;
+	int match;
+	int ret = SUCCESS, status, i;
+
+	scsi_event.event_type = FC_REG_SCSI_EVENT;
+	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
+	scsi_event.lun = 0;
+	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
+
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+	status = fc_block_scsi_eh(cmnd);
+	if (status != 0 && status != SUCCESS)
+		return status;
+
+	/*
+	 * Since the driver manages a single bus device, reset all
+	 * targets known to the driver.  Should any target reset
+	 * fail, this routine returns failure to the midlayer.
+	 */
+	for (i = 0; i < LPFC_MAX_TARGET; i++) {
+		/* Search for mapped node by target ID */
+		match = 0;
+		spin_lock_irq(shost->host_lock);
+		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			if (vport->phba->cfg_fcp2_no_tgt_reset &&
+			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+				continue;
+			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+			    ndlp->nlp_sid == i &&
+			    ndlp->rport &&
+			    ndlp->nlp_type & NLP_FCP_TARGET) {
+				match = 1;
+				break;
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+		if (!match)
+			continue;
+
+		status = lpfc_send_taskmgmt(vport, cmnd,
+					i, 0, FCP_TARGET_RESET);
+
+		if (status != SUCCESS) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+					 "0700 Bus Reset on target %d failed\n",
+					 i);
+			ret = FAILED;
+		}
+	}
+	/*
+	 * We have to clean up i/o as : they may be orphaned by the TMFs
+	 * above; or if any of the TMFs failed, they may be in an
+	 * indeterminate state.
+	 * We will report success if all the i/o aborts successfully.
+	 */
+
+	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
+	if (status != SUCCESS)
+		ret = FAILED;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
+	return ret;
+}
+
+/**
+ * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does host reset to the adaptor port. It brings the HBA
+ * offline, performs a board restart, and then brings the board back online.
+ * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
+ * reject all outstanding SCSI commands to the host and error returned
+ * back to SCSI mid-level. As this will be SCSI mid-level's last resort
+ * of error handling, it will only return error if resetting of the adapter
+ * is not successful; in all other cases, will return success.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int rc, ret = SUCCESS;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "3172 SCSI layer issued Host Reset Data:\n");
+
+	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
+	lpfc_offline(phba);
+	rc = lpfc_sli_brdrestart(phba);
+	if (rc)
+		ret = FAILED;
+	rc = lpfc_online(phba);
+	if (rc)
+		ret = FAILED;
+	lpfc_unblock_mgmt_io(phba);
+
+	if (ret == FAILED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "3323 Failed host reset, bring it offline\n");
+		lpfc_sli4_offline_eratt(phba);
+	}
+	return ret;
+}
+
+/**
+ * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
+ * globally available list of scsi buffers. This routine also makes sure scsi
+ * buffer is not allocated more than HBA limit conveyed to midlayer. This list
+ * of scsi buffer exists for the lifetime of the driver.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
+static int
+lpfc_slave_alloc(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	uint32_t total = 0;
+	uint32_t num_to_alloc = 0;
+	int num_allocated = 0;
+	uint32_t sdev_cnt;
+	struct lpfc_device_data *device_data;
+	unsigned long flags;
+	struct lpfc_name target_wwpn;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	if (phba->cfg_fof) {
+
+		/*
+		 * Check to see if the device data structure for the lun
+		 * exists.  If not, create one.
+		 */
+
+		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
+		spin_lock_irqsave(&phba->devicelock, flags);
+		device_data = __lpfc_get_device_data(phba,
+						     &phba->luns,
+						     &vport->fc_portname,
+						     &target_wwpn,
+						     sdev->lun);
+		if (!device_data) {
+			spin_unlock_irqrestore(&phba->devicelock, flags);
+			device_data = lpfc_create_device_data(phba,
+							&vport->fc_portname,
+							&target_wwpn,
+							sdev->lun,
+							phba->cfg_XLanePriority,
+							true);
+			if (!device_data)
+				return -ENOMEM;
+			spin_lock_irqsave(&phba->devicelock, flags);
+			list_add_tail(&device_data->listentry, &phba->luns);
+		}
+		device_data->rport_data = rport->dd_data;
+		device_data->available = true;
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		sdev->hostdata = device_data;
+	} else {
+		sdev->hostdata = rport->dd_data;
+	}
+	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
+
+	/*
+	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
+	 * available list of scsi buffers.  Don't allocate more than the
+	 * HBA limit conveyed to the midlayer via the host structure.  The
+	 * formula accounts for the lun_queue_depth + error handlers + 1
+	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
+	 */
+	total = phba->total_scsi_bufs;
+	num_to_alloc = vport->cfg_lun_queue_depth + 2;
+
+	/* If allocated buffers are enough do nothing */
+	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
+		return 0;
+
+	/* Allow some exchanges to be available always to complete discovery */
+	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0704 At limitation of %d preallocated "
+				 "command buffers\n", total);
+		return 0;
+	/* Allow some exchanges to be available always to complete discovery */
+	} else if (total + num_to_alloc >
+		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0705 Allocation request of %d "
+				 "command buffers will exceed max of %d.  "
+				 "Reducing allocation request to %d.\n",
+				 num_to_alloc, phba->cfg_hba_queue_depth,
+				 (phba->cfg_hba_queue_depth - total));
+		num_to_alloc = phba->cfg_hba_queue_depth - total;
+	}
+	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+	if (num_to_alloc != num_allocated) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+					 "0708 Allocation request of %d "
+					 "command buffers did not succeed.  "
+					 "Allocated %d buffers.\n",
+					 num_to_alloc, num_allocated);
+	}
+	if (num_allocated > 0)
+		phba->total_scsi_bufs += num_allocated;
+	return 0;
+}
+
+/**
+ * lpfc_slave_configure - scsi_host_template slave_configure entry point
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine configures following items
+ *   - Tag command queuing support for @sdev if supported.
+ *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
+ *
+ * Return codes:
+ *   0 - Success
+ **/
+static int
+lpfc_slave_configure(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
+
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
+		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+			lpfc_poll_rearm_timer(phba);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine sets @sdev hostatdata filed to null.
+ **/
+static void
+lpfc_slave_destroy(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned long flags;
+	struct lpfc_device_data *device_data = sdev->hostdata;
+
+	atomic_dec(&phba->sdev_cnt);
+	if ((phba->cfg_fof) && (device_data)) {
+		spin_lock_irqsave(&phba->devicelock, flags);
+		device_data->available = false;
+		if (!device_data->oas_enabled)
+			lpfc_delete_device_data(phba, device_data);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+	}
+	sdev->hostdata = NULL;
+	return;
+}
+
+/**
+ * lpfc_create_device_data - creates and initializes device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ * @atomic_create: Flag to indicate if memory should be allocated using the
+ *		  GFP_ATOMIC flag or not.
+ *
+ * This routine creates a device data structure which will contain identifying
+ * information for the device (host wwpn, target wwpn, lun), state of OAS,
+ * whether or not the corresponding lun is available by the system,
+ * and pointer to the rport data.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+			struct lpfc_name *target_wwpn, uint64_t lun,
+			uint32_t pri, bool atomic_create)
+{
+
+	struct lpfc_device_data *lun_info;
+	int memory_flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
+	    !(phba->cfg_fof))
+		return NULL;
+
+	/* Attempt to create the device data to contain lun info */
+
+	if (atomic_create)
+		memory_flags = GFP_ATOMIC;
+	else
+		memory_flags = GFP_KERNEL;
+	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
+	if (!lun_info)
+		return NULL;
+	INIT_LIST_HEAD(&lun_info->listentry);
+	lun_info->rport_data  = NULL;
+	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
+	       sizeof(struct lpfc_name));
+	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
+	       sizeof(struct lpfc_name));
+	lun_info->device_id.lun = lun;
+	lun_info->oas_enabled = false;
+	lun_info->priority = pri;
+	lun_info->available = false;
+	return lun_info;
+}
+
+/**
+ * lpfc_delete_device_data - frees a device data structure for OAS
+ * @pha: Pointer to host bus adapter structure.
+ * @lun_info: Pointer to device data structure to free.
+ *
+ * This routine frees the previously allocated device data structure passed.
+ *
+ **/
+void
+lpfc_delete_device_data(struct lpfc_hba *phba,
+			struct lpfc_device_data *lun_info)
+{
+
+	if (unlikely(!phba) || !lun_info  ||
+	    !(phba->cfg_fof))
+		return;
+
+	if (!list_empty(&lun_info->listentry))
+		list_del(&lun_info->listentry);
+	mempool_free(lun_info, phba->device_data_mem_pool);
+	return;
+}
+
+/**
+ * __lpfc_get_device_data - returns the device data for the specified lun
+ * @pha: Pointer to host bus adapter structure.
+ * @list: Point to list to search.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun on target
+ *
+ * This routine searches the list passed for the specified lun's device data.
+ * This function does not hold locks, it is the responsibility of the caller
+ * to ensure the proper lock is held before calling the function.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_device_data - Success
+ **/
+struct lpfc_device_data*
+__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
+		       struct lpfc_name *vport_wwpn,
+		       struct lpfc_name *target_wwpn, uint64_t lun)
+{
+
+	struct lpfc_device_data *lun_info;
+
+	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_fof)
+		return NULL;
+
+	/* Check to see if the lun is already enabled for OAS. */
+
+	list_for_each_entry(lun_info, list, listentry) {
+		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+			    sizeof(struct lpfc_name)) == 0) &&
+		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+			    sizeof(struct lpfc_name)) == 0) &&
+		    (lun_info->device_id.lun == lun))
+			return lun_info;
+	}
+
+	return NULL;
+}
+
+/**
+ * lpfc_find_next_oas_lun - searches for the next oas lun
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @starting_lun: Pointer to the lun to start searching for
+ * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
+ * @found_target_wwpn: Pointer to the found lun's target wwpn information
+ * @found_lun: Pointer to the found lun.
+ * @found_lun_status: Pointer to status of the found lun.
+ *
+ * This routine searches the luns list for the specified lun
+ * or the first lun for the vport/target.  If the vport wwpn contains
+ * a zero value then a specific vport is not specified. In this case
+ * any vport which contains the lun will be considered a match.  If the
+ * target wwpn contains a zero value then a specific target is not specified.
+ * In this case any target which contains the lun will be considered a
+ * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
+ * are returned.  The function will also return the next lun if available.
+ * If the next lun is not found, starting_lun parameter will be set to
+ * NO_MORE_OAS_LUN.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
+bool
+lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
+		       struct lpfc_name *found_vport_wwpn,
+		       struct lpfc_name *found_target_wwpn,
+		       uint64_t *found_lun,
+		       uint32_t *found_lun_status,
+		       uint32_t *found_lun_pri)
+{
+
+	unsigned long flags;
+	struct lpfc_device_data *lun_info;
+	struct lpfc_device_id *device_id;
+	uint64_t lun;
+	bool found = false;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !starting_lun || !found_vport_wwpn ||
+	    !found_target_wwpn || !found_lun || !found_lun_status ||
+	    (*starting_lun == NO_MORE_OAS_LUN) ||
+	    !phba->cfg_fof)
+		return false;
+
+	lun = *starting_lun;
+	*found_lun = NO_MORE_OAS_LUN;
+	*starting_lun = NO_MORE_OAS_LUN;
+
+	/* Search for lun or the lun closet in value */
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+	list_for_each_entry(lun_info, &phba->luns, listentry) {
+		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
+		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
+			    sizeof(struct lpfc_name)) == 0)) &&
+		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
+		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
+			    sizeof(struct lpfc_name)) == 0)) &&
+		    (lun_info->oas_enabled)) {
+			device_id = &lun_info->device_id;
+			if ((!found) &&
+			    ((lun == FIND_FIRST_OAS_LUN) ||
+			     (device_id->lun == lun))) {
+				*found_lun = device_id->lun;
+				memcpy(found_vport_wwpn,
+				       &device_id->vport_wwpn,
+				       sizeof(struct lpfc_name));
+				memcpy(found_target_wwpn,
+				       &device_id->target_wwpn,
+				       sizeof(struct lpfc_name));
+				if (lun_info->available)
+					*found_lun_status =
+						OAS_LUN_STATUS_EXISTS;
+				else
+					*found_lun_status = 0;
+				*found_lun_pri = lun_info->priority;
+				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
+					memset(vport_wwpn, 0x0,
+					       sizeof(struct lpfc_name));
+				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
+					memset(target_wwpn, 0x0,
+					       sizeof(struct lpfc_name));
+				found = true;
+			} else if (found) {
+				*starting_lun = device_id->lun;
+				memcpy(vport_wwpn, &device_id->vport_wwpn,
+				       sizeof(struct lpfc_name));
+				memcpy(target_wwpn, &device_id->target_wwpn,
+				       sizeof(struct lpfc_name));
+				break;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return found;
+}
+
+/**
+ * lpfc_enable_oas_lun - enables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine enables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun has been created.
+ *   2) If found, sets the OAS enabled flag if not set and returns.
+ *   3) Otherwise, creates a device data structure.
+ *   4) If successfully created, indicates the device data is for an OAS lun,
+ *   indicates the lun is not available and add to the list of luns.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
+{
+
+	struct lpfc_device_data *lun_info;
+	unsigned long flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_fof)
+		return false;
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+
+	/* Check to see if the device data for the lun has been created */
+	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
+					  target_wwpn, lun);
+	if (lun_info) {
+		if (!lun_info->oas_enabled)
+			lun_info->oas_enabled = true;
+		lun_info->priority = pri;
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+
+	/* Create an lun info structure and add to list of luns */
+	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
+					   pri, false);
+	if (lun_info) {
+		lun_info->oas_enabled = true;
+		lun_info->priority = pri;
+		lun_info->available = false;
+		list_add_tail(&lun_info->listentry, &phba->luns);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return false;
+}
+
+/**
+ * lpfc_disable_oas_lun - disables a lun for OAS operations
+ * @pha: Pointer to host bus adapter structure.
+ * @vport_wwpn: Pointer to vport's wwpn information
+ * @target_wwpn: Pointer to target's wwpn information
+ * @lun: Lun
+ *
+ * This routine disables a lun for oas operations.  The routines does so by
+ * doing the following :
+ *
+ *   1) Checks to see if the device data for the lun is created.
+ *   2) If present, clears the flag indicating this lun is for OAS.
+ *   3) If the lun is not available by the system, the device data is
+ *   freed.
+ *
+ * Return codes:
+ *   false - Error
+ *   true - Success
+ **/
+bool
+lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
+		     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
+{
+
+	struct lpfc_device_data *lun_info;
+	unsigned long flags;
+
+	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
+	    !phba->cfg_fof)
+		return false;
+
+	spin_lock_irqsave(&phba->devicelock, flags);
+
+	/* Check to see if the lun is available. */
+	lun_info = __lpfc_get_device_data(phba,
+					  &phba->luns, vport_wwpn,
+					  target_wwpn, lun);
+	if (lun_info) {
+		lun_info->oas_enabled = false;
+		lun_info->priority = pri;
+		if (!lun_info->available)
+			lpfc_delete_device_data(phba, lun_info);
+		spin_unlock_irqrestore(&phba->devicelock, flags);
+		return true;
+	}
+
+	spin_unlock_irqrestore(&phba->devicelock, flags);
+	return false;
+}
+
+static int
+lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+{
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static int
+lpfc_no_handler(struct scsi_cmnd *cmnd)
+{
+	return FAILED;
+}
+
+static int
+lpfc_no_slave(struct scsi_device *sdev)
+{
+	return -ENODEV;
+}
+
+struct scsi_host_template lpfc_template_nvme = {
+	.module			= THIS_MODULE,
+	.name			= LPFC_DRIVER_NAME,
+	.proc_name		= LPFC_DRIVER_NAME,
+	.info			= lpfc_info,
+	.queuecommand		= lpfc_no_command,
+	.eh_abort_handler	= lpfc_no_handler,
+	.eh_device_reset_handler = lpfc_no_handler,
+	.eh_target_reset_handler = lpfc_no_handler,
+	.eh_bus_reset_handler	= lpfc_no_handler,
+	.eh_host_reset_handler  = lpfc_no_handler,
+	.slave_alloc		= lpfc_no_slave,
+	.slave_configure	= lpfc_no_slave,
+	.scan_finished		= lpfc_scan_finished,
+	.this_id		= -1,
+	.sg_tablesize		= 1,
+	.cmd_per_lun		= 1,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= lpfc_hba_attrs,
+	.max_sectors		= 0xFFFF,
+	.vendor_id		= LPFC_NL_VENDOR_ID,
+	.track_queue_depth	= 0,
+};
+
+struct scsi_host_template lpfc_template_no_hr = {
+	.module			= THIS_MODULE,
+	.name			= LPFC_DRIVER_NAME,
+	.proc_name		= LPFC_DRIVER_NAME,
+	.info			= lpfc_info,
+	.queuecommand		= lpfc_queuecommand,
+	.eh_timed_out		= fc_eh_timed_out,
+	.eh_abort_handler	= lpfc_abort_handler,
+	.eh_device_reset_handler = lpfc_device_reset_handler,
+	.eh_target_reset_handler = lpfc_target_reset_handler,
+	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
+	.slave_alloc		= lpfc_slave_alloc,
+	.slave_configure	= lpfc_slave_configure,
+	.slave_destroy		= lpfc_slave_destroy,
+	.scan_finished		= lpfc_scan_finished,
+	.this_id		= -1,
+	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
+	.cmd_per_lun		= LPFC_CMD_PER_LUN,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= lpfc_hba_attrs,
+	.max_sectors		= 0xFFFF,
+	.vendor_id		= LPFC_NL_VENDOR_ID,
+	.change_queue_depth	= scsi_change_queue_depth,
+	.track_queue_depth	= 1,
+};
+
+struct scsi_host_template lpfc_template = {
+	.module			= THIS_MODULE,
+	.name			= LPFC_DRIVER_NAME,
+	.proc_name		= LPFC_DRIVER_NAME,
+	.info			= lpfc_info,
+	.queuecommand		= lpfc_queuecommand,
+	.eh_timed_out		= fc_eh_timed_out,
+	.eh_abort_handler	= lpfc_abort_handler,
+	.eh_device_reset_handler = lpfc_device_reset_handler,
+	.eh_target_reset_handler = lpfc_target_reset_handler,
+	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
+	.eh_host_reset_handler  = lpfc_host_reset_handler,
+	.slave_alloc		= lpfc_slave_alloc,
+	.slave_configure	= lpfc_slave_configure,
+	.slave_destroy		= lpfc_slave_destroy,
+	.scan_finished		= lpfc_scan_finished,
+	.this_id		= -1,
+	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
+	.cmd_per_lun		= LPFC_CMD_PER_LUN,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= lpfc_hba_attrs,
+	.max_sectors		= 0xFFFF,
+	.vendor_id		= LPFC_NL_VENDOR_ID,
+	.change_queue_depth	= scsi_change_queue_depth,
+	.track_queue_depth	= 1,
+};
+
+struct scsi_host_template lpfc_vport_template = {
+	.module			= THIS_MODULE,
+	.name			= LPFC_DRIVER_NAME,
+	.proc_name		= LPFC_DRIVER_NAME,
+	.info			= lpfc_info,
+	.queuecommand		= lpfc_queuecommand,
+	.eh_timed_out		= fc_eh_timed_out,
+	.eh_abort_handler	= lpfc_abort_handler,
+	.eh_device_reset_handler = lpfc_device_reset_handler,
+	.eh_target_reset_handler = lpfc_target_reset_handler,
+	.slave_alloc		= lpfc_slave_alloc,
+	.slave_configure	= lpfc_slave_configure,
+	.slave_destroy		= lpfc_slave_destroy,
+	.scan_finished		= lpfc_scan_finished,
+	.this_id		= -1,
+	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
+	.cmd_per_lun		= LPFC_CMD_PER_LUN,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= lpfc_vport_attrs,
+	.max_sectors		= 0xFFFF,
+	.change_queue_depth	= scsi_change_queue_depth,
+	.track_queue_depth	= 1,
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_scsi.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_scsi.h
new file mode 100644
index 0000000..5da7e15
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_scsi.h
@@ -0,0 +1,198 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <asm/byteorder.h>
+
+struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
+
+#define list_remove_head(list, entry, type, member)		\
+	do {							\
+	entry = NULL;						\
+	if (!list_empty(list)) {				\
+		entry = list_entry((list)->next, type, member);	\
+		list_del_init(&entry->member);			\
+	}							\
+	} while(0)
+
+#define list_get_first(list, type, member)			\
+	(list_empty(list)) ? NULL :				\
+	list_entry((list)->next, type, member)
+
+/* per-port data that is allocated in the FC transport for us */
+struct lpfc_rport_data {
+	struct lpfc_nodelist *pnode;	/* Pointer to the node structure. */
+};
+
+struct lpfc_device_id {
+	struct lpfc_name vport_wwpn;
+	struct lpfc_name target_wwpn;
+	uint64_t lun;
+};
+
+struct lpfc_device_data {
+	struct list_head listentry;
+	struct lpfc_rport_data *rport_data;
+	struct lpfc_device_id device_id;
+	uint8_t priority;
+	bool oas_enabled;
+	bool available;
+};
+
+struct fcp_rsp {
+	uint32_t rspRsvd1;	/* FC Word 0, byte 0:3 */
+	uint32_t rspRsvd2;	/* FC Word 1, byte 0:3 */
+
+	uint8_t rspStatus0;	/* FCP_STATUS byte 0 (reserved) */
+	uint8_t rspStatus1;	/* FCP_STATUS byte 1 (reserved) */
+	uint8_t rspStatus2;	/* FCP_STATUS byte 2 field validity */
+#define RSP_LEN_VALID  0x01	/* bit 0 */
+#define SNS_LEN_VALID  0x02	/* bit 1 */
+#define RESID_OVER     0x04	/* bit 2 */
+#define RESID_UNDER    0x08	/* bit 3 */
+	uint8_t rspStatus3;	/* FCP_STATUS byte 3 SCSI status byte */
+
+	uint32_t rspResId;	/* Residual xfer if residual count field set in
+				   fcpStatus2 */
+	/* Received in Big Endian format */
+	uint32_t rspSnsLen;	/* Length of sense data in fcpSnsInfo */
+	/* Received in Big Endian format */
+	uint32_t rspRspLen;	/* Length of FCP response data in fcpRspInfo */
+	/* Received in Big Endian format */
+
+	uint8_t rspInfo0;	/* FCP_RSP_INFO byte 0 (reserved) */
+	uint8_t rspInfo1;	/* FCP_RSP_INFO byte 1 (reserved) */
+	uint8_t rspInfo2;	/* FCP_RSP_INFO byte 2 (reserved) */
+	uint8_t rspInfo3;	/* FCP_RSP_INFO RSP_CODE byte 3 */
+
+#define RSP_NO_FAILURE       0x00
+#define RSP_DATA_BURST_ERR   0x01
+#define RSP_CMD_FIELD_ERR    0x02
+#define RSP_RO_MISMATCH_ERR  0x03
+#define RSP_TM_NOT_SUPPORTED 0x04	/* Task mgmt function not supported */
+#define RSP_TM_NOT_COMPLETED 0x05	/* Task mgmt function not performed */
+#define RSP_TM_INVALID_LU    0x09	/* Task mgmt function to invalid LU */
+
+	uint32_t rspInfoRsvd;	/* FCP_RSP_INFO bytes 4-7 (reserved) */
+
+	uint8_t rspSnsInfo[128];
+#define SNS_ILLEGAL_REQ 0x05	/* sense key is byte 3 ([2]) */
+#define SNSCOD_BADCMD 0x20	/* sense code is byte 13 ([12]) */
+};
+
+struct fcp_cmnd {
+	struct scsi_lun  fcp_lun;
+
+	uint8_t fcpCntl0;	/* FCP_CNTL byte 0 (reserved) */
+	uint8_t fcpCntl1;	/* FCP_CNTL byte 1 task codes */
+#define  SIMPLE_Q        0x00
+#define  HEAD_OF_Q       0x01
+#define  ORDERED_Q       0x02
+#define  ACA_Q           0x04
+#define  UNTAGGED        0x05
+	uint8_t fcpCntl2;	/* FCP_CTL byte 2 task management codes */
+#define  FCP_ABORT_TASK_SET  0x02	/* Bit 1 */
+#define  FCP_CLEAR_TASK_SET  0x04	/* bit 2 */
+#define  FCP_BUS_RESET       0x08	/* bit 3 */
+#define  FCP_LUN_RESET       0x10	/* bit 4 */
+#define  FCP_TARGET_RESET    0x20	/* bit 5 */
+#define  FCP_CLEAR_ACA       0x40	/* bit 6 */
+#define  FCP_TERMINATE_TASK  0x80	/* bit 7 */
+	uint8_t fcpCntl3;
+#define  WRITE_DATA      0x01	/* Bit 0 */
+#define  READ_DATA       0x02	/* Bit 1 */
+
+	uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
+	uint32_t fcpDl;		/* Total transfer length */
+
+};
+
+struct lpfc_scsicmd_bkt {
+	uint32_t cmd_count;
+};
+
+struct lpfc_scsi_buf {
+	struct list_head list;
+	struct scsi_cmnd *pCmd;
+	struct lpfc_rport_data *rdata;
+
+	uint32_t timeout;
+
+	uint16_t flags;  /* TBD convert exch_busy to flags */
+#define LPFC_SBUF_XBUSY         0x1     /* SLI4 hba reported XB on WCQE cmpl */
+	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
+	uint16_t status;	/* From IOCB Word 7- ulpStatus */
+	uint32_t result;	/* From IOCB Word 4. */
+
+	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
+				 * dma_map_sg.  The driver needs this for calls
+				 * to dma_unmap_sg. */
+	uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for protection data */
+
+	dma_addr_t nonsg_phys;	/* Non scatter-gather physical address. */
+
+	/*
+	 * data and dma_handle are the kernel virtual and bus address of the
+	 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+	 * gather bde list that supports the sg_tablesize value.
+	 */
+	void *data;
+	dma_addr_t dma_handle;
+
+	struct fcp_cmnd *fcp_cmnd;
+	struct fcp_rsp *fcp_rsp;
+	struct ulp_bde64 *fcp_bpl;
+
+	dma_addr_t dma_phys_bpl;
+
+	/* cur_iocbq has phys of the dma-able buffer.
+	 * Iotag is in here
+	 */
+	struct lpfc_iocbq cur_iocbq;
+	uint16_t cpu;
+
+	wait_queue_head_t *waitq;
+	unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	/* Used to restore any changes to protection data for error injection */
+	void *prot_data_segment;
+	uint32_t prot_data;
+	uint32_t prot_data_type;
+#define	LPFC_INJERR_REFTAG	1
+#define	LPFC_INJERR_APPTAG	2
+#define	LPFC_INJERR_GUARD	3
+#endif
+};
+
+#define LPFC_SCSI_DMA_EXT_SIZE	264
+#define LPFC_BPL_SIZE		1024
+#define MDAC_DIRECT_CMD		0x22
+
+#define FIND_FIRST_OAS_LUN	0
+#define NO_MORE_OAS_LUN		-1
+#define NOT_OAS_ENABLED_LUN	NO_MORE_OAS_LUN
+
+#define TXRDY_PAYLOAD_LEN	12
+
+int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+				  struct lpfc_scsi_buf *lpfc_cmd);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli.c
new file mode 100644
index 0000000..45445da
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli.c
@@ -0,0 +1,19008 @@
+
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/lockdep.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+#include <linux/aer.h>
+
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+/* There are only four IOCB completion types. */
+typedef enum _lpfc_iocb_type {
+	LPFC_UNKNOWN_IOCB,
+	LPFC_UNSOL_IOCB,
+	LPFC_SOL_IOCB,
+	LPFC_ABORT_IOCB
+} lpfc_iocb_type;
+
+
+/* Provide function prototypes local to this module. */
+static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
+				  uint32_t);
+static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
+			      uint8_t *, uint32_t *);
+static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
+							 struct lpfc_iocbq *);
+static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
+				      struct hbq_dmabuf *);
+static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
+					  struct hbq_dmabuf *dmabuf);
+static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
+				    struct lpfc_cqe *);
+static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
+				       int);
+static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+				    struct lpfc_eqe *eqe, uint32_t qidx);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring,
+				   struct lpfc_iocbq *cmdiocb);
+
+static IOCB_t *
+lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+{
+	return &iocbq->iocb;
+}
+
+/**
+ * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
+ * @q: The Work Queue to operate on.
+ * @wqe: The work Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static int
+lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+{
+	union lpfc_wqe *temp_wqe;
+	struct lpfc_register doorbell;
+	uint32_t host_index;
+	uint32_t idx;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return -ENOMEM;
+	temp_wqe = q->qe[q->host_index].wqe;
+
+	/* If the host has not yet processed the next entry then we are done */
+	idx = ((q->host_index + 1) % q->entry_count);
+	if (idx == q->hba_index) {
+		q->WQ_overflow++;
+		return -EBUSY;
+	}
+	q->WQ_posted++;
+	/* set consumption flag every once in a while */
+	if (!((q->host_index + 1) % q->entry_repost))
+		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
+	else
+		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
+	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
+	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+	/* ensure WQE bcopy flushed before doorbell write */
+	wmb();
+
+	/* Update the host index before invoking device */
+	host_index = q->host_index;
+
+	q->host_index = idx;
+
+	/* Ring Doorbell */
+	doorbell.word0 = 0;
+	if (q->db_format == LPFC_DB_LIST_FORMAT) {
+		bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+		bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+		bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
+		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+	} else {
+		return -EINVAL;
+	}
+	writel(doorbell.word0, q->db_regaddr);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_wq_release - Updates internal hba index for WQ
+ * @q: The Work Queue to operate on.
+ * @index: The index to advance the hba index to.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
+{
+	uint32_t released = 0;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+
+	if (q->hba_index == index)
+		return 0;
+	do {
+		q->hba_index = ((q->hba_index + 1) % q->entry_count);
+		released++;
+	} while (q->hba_index != index);
+	return released;
+}
+
+/**
+ * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
+ * @q: The Mailbox Queue to operate on.
+ * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @mqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
+{
+	struct lpfc_mqe *temp_mqe;
+	struct lpfc_register doorbell;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return -ENOMEM;
+	temp_mqe = q->qe[q->host_index].mqe;
+
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+		return -ENOMEM;
+	lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+	/* Save off the mailbox pointer for completion */
+	q->phba->mbox = (MAILBOX_t *)temp_mqe;
+
+	/* Update the host index before invoking device */
+	q->host_index = ((q->host_index + 1) % q->entry_count);
+
+	/* Ring Doorbell */
+	doorbell.word0 = 0;
+	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
+	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
+	return 0;
+}
+
+/**
+ * lpfc_sli4_mq_release - Updates internal hba index for MQ
+ * @q: The Mailbox Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_mq_release(struct lpfc_queue *q)
+{
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+
+	/* Clear the mailbox pointer for completion */
+	q->phba->mbox = NULL;
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return 1;
+}
+
+/**
+ * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
+ * @q: The Event Queue to get the first valid EQE from
+ *
+ * This routine will get the first valid Event Queue Entry from @q, update
+ * the queue's internal hba index, and return the EQE. If no valid EQEs are in
+ * the Queue (no more work to do), or the Queue is full of EQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_eqe *
+lpfc_sli4_eq_get(struct lpfc_queue *q)
+{
+	struct lpfc_eqe *eqe;
+	uint32_t idx;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return NULL;
+	eqe = q->qe[q->hba_index].eqe;
+
+	/* If the next EQE is not valid then we are done */
+	if (!bf_get_le32(lpfc_eqe_valid, eqe))
+		return NULL;
+	/* If the host has not yet processed the next entry then we are done */
+	idx = ((q->hba_index + 1) % q->entry_count);
+	if (idx == q->host_index)
+		return NULL;
+
+	q->hba_index = idx;
+
+	/*
+	 * insert barrier for instruction interlock : data from the hardware
+	 * must have the valid bit checked before it can be copied and acted
+	 * upon. Speculative instructions were allowing a bcopy at the start
+	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+	 * after our return, to copy data before the valid bit check above
+	 * was done. As such, some of the copied data was stale. The barrier
+	 * ensures the check is before any data is copied.
+	 */
+	mb();
+	return eqe;
+}
+
+/**
+ * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
+ * @q: The Event Queue to disable interrupts
+ *
+ **/
+static inline void
+lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
+{
+	struct lpfc_register doorbell;
+
+	doorbell.word0 = 0;
+	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+}
+
+/**
+ * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * @q: The Event Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Event Queue Entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of EQEs that were popped.
+ **/
+uint32_t
+lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+{
+	uint32_t released = 0;
+	struct lpfc_eqe *temp_eqe;
+	struct lpfc_register doorbell;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+
+	/* while there are valid entries */
+	while (q->hba_index != q->host_index) {
+		temp_eqe = q->qe[q->host_index].eqe;
+		bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
+		released++;
+		q->host_index = ((q->host_index + 1) % q->entry_count);
+	}
+	if (unlikely(released == 0 && !arm))
+		return 0;
+
+	/* ring doorbell for number popped */
+	doorbell.word0 = 0;
+	if (arm) {
+		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+	}
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
+	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
+		readl(q->phba->sli4_hba.EQCQDBregaddr);
+	return released;
+}
+
+/**
+ * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
+ * @q: The Completion Queue to get the first valid CQE from
+ *
+ * This routine will get the first valid Completion Queue Entry from @q, update
+ * the queue's internal hba index, and return the CQE. If no valid CQEs are in
+ * the Queue (no more work to do), or the Queue is full of CQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_cqe *
+lpfc_sli4_cq_get(struct lpfc_queue *q)
+{
+	struct lpfc_cqe *cqe;
+	uint32_t idx;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return NULL;
+
+	/* If the next CQE is not valid then we are done */
+	if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+		return NULL;
+	/* If the host has not yet processed the next entry then we are done */
+	idx = ((q->hba_index + 1) % q->entry_count);
+	if (idx == q->host_index)
+		return NULL;
+
+	cqe = q->qe[q->hba_index].cqe;
+	q->hba_index = idx;
+
+	/*
+	 * insert barrier for instruction interlock : data from the hardware
+	 * must have the valid bit checked before it can be copied and acted
+	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+	 * instructions allowing action on content before valid bit checked,
+	 * add barrier here as well. May not be needed as "content" is a
+	 * single 32-bit entity here (vs multi word structure for cq's).
+	 */
+	mb();
+	return cqe;
+}
+
+/**
+ * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * @q: The Completion Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Completion queue entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of CQEs that were released.
+ **/
+uint32_t
+lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+{
+	uint32_t released = 0;
+	struct lpfc_cqe *temp_qe;
+	struct lpfc_register doorbell;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+	/* while there are valid entries */
+	while (q->hba_index != q->host_index) {
+		temp_qe = q->qe[q->host_index].cqe;
+		bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
+		released++;
+		q->host_index = ((q->host_index + 1) % q->entry_count);
+	}
+	if (unlikely(released == 0 && !arm))
+		return 0;
+
+	/* ring doorbell for number popped */
+	doorbell.word0 = 0;
+	if (arm)
+		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
+	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
+			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	return released;
+}
+
+/**
+ * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
+ * @q: The Header Receive Queue to operate on.
+ * @wqe: The Receive Queue Entry to put on the Receive queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Receive Queue Doorbell to signal the
+ * HBA to start processing the Receive Queue Entry. This function returns the
+ * index that the rqe was copied to if successful. If no entries are available
+ * on @q then this function will return -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+int
+lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
+{
+	struct lpfc_rqe *temp_hrqe;
+	struct lpfc_rqe *temp_drqe;
+	struct lpfc_register doorbell;
+	int put_index;
+
+	/* sanity check on queue memory */
+	if (unlikely(!hq) || unlikely(!dq))
+		return -ENOMEM;
+	put_index = hq->host_index;
+	temp_hrqe = hq->qe[put_index].rqe;
+	temp_drqe = dq->qe[dq->host_index].rqe;
+
+	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
+		return -EINVAL;
+	if (put_index != dq->host_index)
+		return -EINVAL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+		return -EBUSY;
+	lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+	lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+
+	/* Update the host index to point to the next slot */
+	hq->host_index = ((put_index + 1) % hq->entry_count);
+	dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+	hq->RQ_buf_posted++;
+
+	/* Ring The Header Receive Queue Doorbell */
+	if (!(hq->host_index % hq->entry_repost)) {
+		doorbell.word0 = 0;
+		if (hq->db_format == LPFC_DB_RING_FORMAT) {
+			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+			       hq->entry_repost);
+			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+			       hq->entry_repost);
+			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+			       hq->host_index);
+			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+		} else {
+			return -EINVAL;
+		}
+		writel(doorbell.word0, hq->db_regaddr);
+	}
+	return put_index;
+}
+
+/**
+ * lpfc_sli4_rq_release - Updates internal hba index for RQ
+ * @q: The Header Receive Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * one Receive Queue Entry by the HBA. When the HBA indicates that it has
+ * consumed an entry the host calls this function to update the queue's
+ * internal pointers. This routine returns the number of entries that were
+ * consumed by the HBA.
+ **/
+static uint32_t
+lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
+{
+	/* sanity check on queue memory */
+	if (unlikely(!hq) || unlikely(!dq))
+		return 0;
+
+	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
+		return 0;
+	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
+	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
+	return 1;
+}
+
+/**
+ * lpfc_cmd_iocb - Get next command iocb entry in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next command iocb entry
+ * in the command ring. The caller must hold hbalock to prevent
+ * other threads consume the next command iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
+static inline IOCB_t *
+lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
+			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
+}
+
+/**
+ * lpfc_resp_iocb - Get next response iocb entry in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next response iocb entry
+ * in the response ring. The caller must hold hbalock to make sure
+ * that no other thread consume the next response iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
+static inline IOCB_t *
+lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
+			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
+}
+
+/**
+ * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
+struct lpfc_iocbq *
+__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+	struct lpfc_iocbq * iocbq = NULL;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
+	if (iocbq)
+		phba->iocb_cnt++;
+	if (phba->iocb_cnt > phba->iocb_max)
+		phba->iocb_max = phba->iocb_cnt;
+	return iocbq;
+}
+
+/**
+ * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function clears the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+struct lpfc_sglq *
+__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+	struct lpfc_sglq *sglq;
+
+	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
+	return sglq;
+}
+
+/**
+ * __lpfc_get_active_sglq - Get the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function returns the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+struct lpfc_sglq *
+__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+	struct lpfc_sglq *sglq;
+
+	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
+	return sglq;
+}
+
+/**
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ **/
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+		    uint16_t xritag,
+		    struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+
+	if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+
+	/* The target DID could have been swapped (cable swap)
+	 * we should use the ndlp from the findnode if it is
+	 * available.
+	 */
+	if ((!ndlp) && rrq->ndlp)
+		ndlp = rrq->ndlp;
+
+	if (!ndlp)
+		goto out;
+
+	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
+		rrq->send_rrq = 0;
+		rrq->xritag = 0;
+		rrq->rrq_stop_time = 0;
+	}
+out:
+	mempool_free(rrq, phba->rrq_pool);
+}
+
+/**
+ * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Checks if stop_time (ratov from setting rrq active) has
+ * been reached, if it has and the send_rrq flag is set then
+ * it will call lpfc_send_rrq. If the send_rrq flag is not set
+ * then it will just call the routine to clear the rrq and
+ * free the rrq resource.
+ * The timer is set to the next rrq that is going to expire before
+ * leaving the routine.
+ *
+ **/
+void
+lpfc_handle_rrq_active(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+	LIST_HEAD(send_rrq);
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+	list_for_each_entry_safe(rrq, nextrrq,
+				 &phba->active_rrq_list, list) {
+		if (time_after(jiffies, rrq->rrq_stop_time))
+			list_move(&rrq->list, &send_rrq);
+		else if (time_before(rrq->rrq_stop_time, next_time))
+			next_time = rrq->rrq_stop_time;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if ((!list_empty(&phba->active_rrq_list)) &&
+	    (!(phba->pport->load_flag & FC_UNLOADING)))
+		mod_timer(&phba->rrq_tmr, next_time);
+	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+		list_del(&rrq->list);
+		if (!rrq->send_rrq)
+			/* this call will free the rrq */
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		else if (lpfc_send_rrq(phba, rrq)) {
+			/* if we send the rrq then the completion handler
+			*  will clear the bit in the xribitmap.
+			*/
+			lpfc_clr_rrq_active(phba, rrq->xritag,
+					    rrq);
+		}
+	}
+}
+
+/**
+ * lpfc_get_active_rrq - Get the active RRQ for this exchange.
+ * @vport: Pointer to vport context object.
+ * @xri: The xri used in the exchange.
+ * @did: The targets DID for this exchange.
+ *
+ * returns NULL = rrq not found in the phba->active_rrq_list.
+ *         rrq = rrq for this xri and target.
+ **/
+struct lpfc_node_rrq *
+lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return NULL;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		if (rrq->vport == vport && rrq->xritag == xri &&
+				rrq->nlp_DID == did){
+			list_del(&rrq->list);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			return rrq;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return NULL;
+}
+
+/**
+ * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
+ * @vport: Pointer to vport context object.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
+ **/
+void
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+	LIST_HEAD(rrq_list);
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	if (!ndlp) {
+		lpfc_sli4_vport_delete_els_xri_aborted(vport);
+		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
+	}
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+		if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
+			list_move(&rrq->list, &rrq_list);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+		list_del(&rrq->list);
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	}
+}
+
+/**
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function is called with hbalock held. This function
+ * returns 0 = rrq not active for this xri
+ *         1 = rrq is valid for this xri.
+ **/
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t  xritag)
+{
+	lockdep_assert_held(&phba->hbalock);
+	if (!ndlp)
+		return 0;
+	if (!ndlp->active_rrqs_xri_bitmap)
+		return 0;
+	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
+			return 1;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function takes the hbalock.
+ * The active bit is always set in the active rrq xri_bitmap even
+ * if there is no slot avaiable for the other rrq information.
+ *
+ * returns 0 rrq actived for this xri
+ *         < 0 No memory or invalid ndlp.
+ **/
+int
+lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+	unsigned long iflags;
+	struct lpfc_node_rrq *rrq;
+	int empty;
+
+	if (!ndlp)
+		return -EINVAL;
+
+	if (!phba->cfg_enable_rrq)
+		return -EINVAL;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (phba->pport->load_flag & FC_UNLOADING) {
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+		goto out;
+	}
+
+	/*
+	 * set the active bit even if there is no mem available.
+	 */
+	if (NLP_CHK_FREE_REQ(ndlp))
+		goto out;
+
+	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+		goto out;
+
+	if (!ndlp->active_rrqs_xri_bitmap)
+		goto out;
+
+	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
+		goto out;
+
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+	if (!rrq) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
+				" DID:0x%x Send:%d\n",
+				xritag, rxid, ndlp->nlp_DID, send_rrq);
+		return -EINVAL;
+	}
+	if (phba->cfg_enable_rrq == 1)
+		rrq->send_rrq = send_rrq;
+	else
+		rrq->send_rrq = 0;
+	rrq->xritag = xritag;
+	rrq->rrq_stop_time = jiffies +
+				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+	rrq->ndlp = ndlp;
+	rrq->nlp_DID = ndlp->nlp_DID;
+	rrq->vport = ndlp->vport;
+	rrq->rxid = rxid;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	empty = list_empty(&phba->active_rrq_list);
+	list_add_tail(&rrq->list, &phba->active_rrq_list);
+	phba->hba_flag |= HBA_RRQ_ACTIVE;
+	if (empty)
+		lpfc_worker_wake_up(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return 0;
+out:
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
+			" DID:0x%x Send:%d\n",
+			xritag, rxid, ndlp->nlp_DID, send_rrq);
+	return -EINVAL;
+}
+
+/**
+ * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
+ *
+ * This function is called with the ring lock held. This function
+ * gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+static struct lpfc_sglq *
+__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+{
+	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
+	struct lpfc_sglq *sglq = NULL;
+	struct lpfc_sglq *start_sglq = NULL;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_nodelist *ndlp;
+	int found = 0;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
+		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+		ndlp = lpfc_cmd->rdata->pnode;
+	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
+			!(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+		ndlp = piocbq->context_un.ndlp;
+	} else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
+		if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+			ndlp = NULL;
+		else
+			ndlp = piocbq->context_un.ndlp;
+	} else {
+		ndlp = piocbq->context1;
+	}
+
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
+	start_sglq = sglq;
+	while (!found) {
+		if (!sglq)
+			break;
+		if (ndlp && ndlp->active_rrqs_xri_bitmap &&
+		    test_bit(sglq->sli4_lxritag,
+		    ndlp->active_rrqs_xri_bitmap)) {
+			/* This xri has an rrq outstanding for this DID.
+			 * put it back in the list and get another xri.
+			 */
+			list_add_tail(&sglq->list, lpfc_els_sgl_list);
+			sglq = NULL;
+			list_remove_head(lpfc_els_sgl_list, sglq,
+						struct lpfc_sglq, list);
+			if (sglq == start_sglq) {
+				list_add_tail(&sglq->list, lpfc_els_sgl_list);
+				sglq = NULL;
+				break;
+			} else
+				continue;
+		}
+		sglq->ndlp = ndlp;
+		found = 1;
+		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
+		sglq->state = SGL_ALLOCATED;
+	}
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	return sglq;
+}
+
+/**
+ * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
+ *
+ * This function is called with the sgl_list lock held. This function
+ * gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+struct lpfc_sglq *
+__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+{
+	struct list_head *lpfc_nvmet_sgl_list;
+	struct lpfc_sglq *sglq = NULL;
+
+	lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
+
+	lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
+
+	list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
+	if (!sglq)
+		return NULL;
+	phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
+	sglq->state = SGL_ALLOCATED;
+	return sglq;
+}
+
+/**
+ * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
+struct lpfc_iocbq *
+lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq * iocbq = NULL;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	iocbq = __lpfc_sli_get_iocbq(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return iocbq;
+}
+
+/**
+ * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ * The sqlq structure that holds the xritag and phys and virtual
+ * mappings for the scatter gather list is retrieved from the
+ * active array of sglq. The get of the sglq pointer also clears
+ * the entry in the array. If the status of the IO indiactes that
+ * this IO was aborted then the sglq entry it put on the
+ * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
+ * IO has good status or fails for any other reason then the sglq
+ * entry is added to the free list (lpfc_els_sgl_list).
+ **/
+static void
+__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_sglq *sglq;
+	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+	unsigned long iflag = 0;
+	struct lpfc_sli_ring *pring;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	if (iocbq->sli4_xritag == NO_XRI)
+		sglq = NULL;
+	else
+		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
+
+	if (sglq)  {
+		if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+					  iflag);
+			sglq->state = SGL_FREED;
+			sglq->ndlp = NULL;
+			list_add_tail(&sglq->list,
+				      &phba->sli4_hba.lpfc_nvmet_sgl_list);
+			spin_unlock_irqrestore(
+				&phba->sli4_hba.sgl_list_lock, iflag);
+			goto out;
+		}
+
+		pring = phba->sli4_hba.els_wq->pring;
+		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+			(sglq->state != SGL_XRI_ABORTED)) {
+			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+					  iflag);
+			list_add(&sglq->list,
+				 &phba->sli4_hba.lpfc_abts_els_sgl_list);
+			spin_unlock_irqrestore(
+				&phba->sli4_hba.sgl_list_lock, iflag);
+		} else {
+			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+					  iflag);
+			sglq->state = SGL_FREED;
+			sglq->ndlp = NULL;
+			list_add_tail(&sglq->list,
+				      &phba->sli4_hba.lpfc_els_sgl_list);
+			spin_unlock_irqrestore(
+				&phba->sli4_hba.sgl_list_lock, iflag);
+
+			/* Check if TXQ queue needs to be serviced */
+			if (!list_empty(&pring->txq))
+				lpfc_worker_wake_up(phba);
+		}
+	}
+
+out:
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+	iocbq->sli4_lxritag = NO_XRI;
+	iocbq->sli4_xritag = NO_XRI;
+	iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
+			      LPFC_IO_NVME_LS);
+	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+
+/**
+ * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+
+	lockdep_assert_held(&phba->hbalock);
+
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+	iocbq->sli4_xritag = NO_XRI;
+	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+/**
+ * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	lockdep_assert_held(&phba->hbalock);
+
+	phba->__lpfc_sli_release_iocbq(phba, iocbq);
+	phba->iocb_cnt--;
+}
+
+/**
+ * lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with no lock held to release the iocb to
+ * iocb pool.
+ **/
+void
+lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	unsigned long iflags;
+
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_sli_release_iocbq(phba, iocbq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
+ * @phba: Pointer to HBA context object.
+ * @iocblist: List of IOCBs.
+ * @ulpstatus: ULP status in IOCB command field.
+ * @ulpWord4: ULP word-4 in IOCB command field.
+ *
+ * This function is called with a list of IOCBs to cancel. It cancels the IOCB
+ * on the list by invoking the complete callback function associated with the
+ * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
+ * fields.
+ **/
+void
+lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
+		      uint32_t ulpstatus, uint32_t ulpWord4)
+{
+	struct lpfc_iocbq *piocb;
+
+	while (!list_empty(iocblist)) {
+		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
+		if (!piocb->iocb_cmpl)
+			lpfc_sli_release_iocbq(phba, piocb);
+		else {
+			piocb->iocb.ulpStatus = ulpstatus;
+			piocb->iocb.un.ulpWord[4] = ulpWord4;
+			(piocb->iocb_cmpl) (phba, piocb, piocb);
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_sli_iocb_cmd_type - Get the iocb type
+ * @iocb_cmnd: iocb command code.
+ *
+ * This function is called by ring event handler function to get the iocb type.
+ * This function translates the iocb command to an iocb command type used to
+ * decide the final disposition of each completed IOCB.
+ * The function returns
+ * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
+ * LPFC_SOL_IOCB     if it is a solicited iocb completion
+ * LPFC_ABORT_IOCB   if it is an abort iocb
+ * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
+ *
+ * The caller is not required to hold any lock.
+ **/
+static lpfc_iocb_type
+lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
+{
+	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
+
+	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
+		return 0;
+
+	switch (iocb_cmnd) {
+	case CMD_XMIT_SEQUENCE_CR:
+	case CMD_XMIT_SEQUENCE_CX:
+	case CMD_XMIT_BCAST_CN:
+	case CMD_XMIT_BCAST_CX:
+	case CMD_ELS_REQUEST_CR:
+	case CMD_ELS_REQUEST_CX:
+	case CMD_CREATE_XRI_CR:
+	case CMD_CREATE_XRI_CX:
+	case CMD_GET_RPI_CN:
+	case CMD_XMIT_ELS_RSP_CX:
+	case CMD_GET_RPI_CR:
+	case CMD_FCP_IWRITE_CR:
+	case CMD_FCP_IWRITE_CX:
+	case CMD_FCP_IREAD_CR:
+	case CMD_FCP_IREAD_CX:
+	case CMD_FCP_ICMND_CR:
+	case CMD_FCP_ICMND_CX:
+	case CMD_FCP_TSEND_CX:
+	case CMD_FCP_TRSP_CX:
+	case CMD_FCP_TRECEIVE_CX:
+	case CMD_FCP_AUTO_TRSP_CX:
+	case CMD_ADAPTER_MSG:
+	case CMD_ADAPTER_DUMP:
+	case CMD_XMIT_SEQUENCE64_CR:
+	case CMD_XMIT_SEQUENCE64_CX:
+	case CMD_XMIT_BCAST64_CN:
+	case CMD_XMIT_BCAST64_CX:
+	case CMD_ELS_REQUEST64_CR:
+	case CMD_ELS_REQUEST64_CX:
+	case CMD_FCP_IWRITE64_CR:
+	case CMD_FCP_IWRITE64_CX:
+	case CMD_FCP_IREAD64_CR:
+	case CMD_FCP_IREAD64_CX:
+	case CMD_FCP_ICMND64_CR:
+	case CMD_FCP_ICMND64_CX:
+	case CMD_FCP_TSEND64_CX:
+	case CMD_FCP_TRSP64_CX:
+	case CMD_FCP_TRECEIVE64_CX:
+	case CMD_GEN_REQUEST64_CR:
+	case CMD_GEN_REQUEST64_CX:
+	case CMD_XMIT_ELS_RSP64_CX:
+	case DSSCMD_IWRITE64_CR:
+	case DSSCMD_IWRITE64_CX:
+	case DSSCMD_IREAD64_CR:
+	case DSSCMD_IREAD64_CX:
+		type = LPFC_SOL_IOCB;
+		break;
+	case CMD_ABORT_XRI_CN:
+	case CMD_ABORT_XRI_CX:
+	case CMD_CLOSE_XRI_CN:
+	case CMD_CLOSE_XRI_CX:
+	case CMD_XRI_ABORTED_CX:
+	case CMD_ABORT_MXRI64_CN:
+	case CMD_XMIT_BLS_RSP64_CX:
+		type = LPFC_ABORT_IOCB;
+		break;
+	case CMD_RCV_SEQUENCE_CX:
+	case CMD_RCV_ELS_REQ_CX:
+	case CMD_RCV_SEQUENCE64_CX:
+	case CMD_RCV_ELS_REQ64_CX:
+	case CMD_ASYNC_STATUS:
+	case CMD_IOCB_RCV_SEQ64_CX:
+	case CMD_IOCB_RCV_ELS64_CX:
+	case CMD_IOCB_RCV_CONT64_CX:
+	case CMD_IOCB_RET_XRI64_CX:
+		type = LPFC_UNSOL_IOCB;
+		break;
+	case CMD_IOCB_XMIT_MSEQ64_CR:
+	case CMD_IOCB_XMIT_MSEQ64_CX:
+	case CMD_IOCB_RCV_SEQ_LIST64_CX:
+	case CMD_IOCB_RCV_ELS_LIST64_CX:
+	case CMD_IOCB_CLOSE_EXTENDED_CN:
+	case CMD_IOCB_ABORT_EXTENDED_CN:
+	case CMD_IOCB_RET_HBQE64_CN:
+	case CMD_IOCB_FCP_IBIDIR64_CR:
+	case CMD_IOCB_FCP_IBIDIR64_CX:
+	case CMD_IOCB_FCP_ITASKMGT64_CX:
+	case CMD_IOCB_LOGENTRY_CN:
+	case CMD_IOCB_LOGENTRY_ASYNC_CN:
+		printk("%s - Unhandled SLI-3 Command x%x\n",
+				__func__, iocb_cmnd);
+		type = LPFC_UNKNOWN_IOCB;
+		break;
+	default:
+		type = LPFC_UNKNOWN_IOCB;
+		break;
+	}
+
+	return type;
+}
+
+/**
+ * lpfc_sli_ring_map - Issue config_ring mbox for all rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from SLI initialization code
+ * to configure every ring of the HBA's SLI interface. The
+ * caller is not required to hold any lock. This function issues
+ * a config_ring mailbox command for each ring.
+ * This function returns zero if successful else returns a negative
+ * error code.
+ **/
+static int
+lpfc_sli_ring_map(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *pmbox;
+	int i, rc, ret = 0;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb)
+		return -ENOMEM;
+	pmbox = &pmb->u.mb;
+	phba->link_state = LPFC_INIT_MBX_CMDS;
+	for (i = 0; i < psli->num_rings; i++) {
+		lpfc_config_ring(phba, i, pmb);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0446 Adapter failed to init (%d), "
+					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
+					"ring %d\n",
+					rc, pmbox->mbxCommand,
+					pmbox->mbxStatus, i);
+			phba->link_state = LPFC_HBA_ERROR;
+			ret = -ENXIO;
+			break;
+		}
+	}
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return ret;
+}
+
+/**
+ * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to the driver iocb object.
+ *
+ * This function is called with hbalock held. The function adds the
+ * new iocb to txcmplq of the given ring. This function always returns
+ * 0. If this function is called for ELS ring, this function checks if
+ * there is a vport associated with the ELS command. This function also
+ * starts els_tmofunc timer if this is an ELS command.
+ **/
+static int
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *piocb)
+{
+	lockdep_assert_held(&phba->hbalock);
+
+	BUG_ON(!piocb);
+
+	list_add_tail(&piocb->list, &pring->txcmplq);
+	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+
+	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
+	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+		BUG_ON(!piocb->vport);
+		if (!(piocb->vport->load_flag & FC_UNLOADING))
+			mod_timer(&piocb->vport->els_tmofunc,
+				  jiffies +
+				  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_ringtx_get - Get first element of the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to get next
+ * iocb in txq of the given ring. If there is any iocb in
+ * the txq, the function returns first iocb in the list after
+ * removing the iocb from the list, else it returns NULL.
+ **/
+struct lpfc_iocbq *
+lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	struct lpfc_iocbq *cmd_iocb;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
+	return cmd_iocb;
+}
+
+/**
+ * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held and the caller must post the
+ * iocb without releasing the lock. If the caller releases the lock,
+ * iocb slot returned by the function is not guaranteed to be available.
+ * The function returns pointer to the next available iocb slot if there
+ * is available slot in the ring, else it returns NULL.
+ * If the get index of the ring is ahead of the put index, the function
+ * will post an error attention event to the worker thread to take the
+ * HBA to offline state.
+ **/
+static IOCB_t *
+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
+	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
+		pring->sli.sli3.next_cmdidx = 0;
+
+	if (unlikely(pring->sli.sli3.local_getidx ==
+		pring->sli.sli3.next_cmdidx)) {
+
+		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
+
+		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0315 Ring %d issue: portCmdGet %d "
+					"is bigger than cmd ring %d\n",
+					pring->ringno,
+					pring->sli.sli3.local_getidx,
+					max_cmd_idx);
+
+			phba->link_state = LPFC_HBA_ERROR;
+			/*
+			 * All error attention handlers are posted to
+			 * worker thread
+			 */
+			phba->work_ha |= HA_ERATT;
+			phba->work_hs = HS_FFER3;
+
+			lpfc_worker_wake_up(phba);
+
+			return NULL;
+		}
+
+		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
+			return NULL;
+	}
+
+	return lpfc_cmd_iocb(phba, pring);
+}
+
+/**
+ * lpfc_sli_next_iotag - Get an iotag for the iocb
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function gets an iotag for the iocb. If there is no unused iotag and
+ * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
+ * array and assigns a new iotag.
+ * The function returns the allocated iotag if successful, else returns zero.
+ * Zero is not a valid iotag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_iocbq **new_arr;
+	struct lpfc_iocbq **old_arr;
+	size_t new_len;
+	struct lpfc_sli *psli = &phba->sli;
+	uint16_t iotag;
+
+	spin_lock_irq(&phba->hbalock);
+	iotag = psli->last_iotag;
+	if(++iotag < psli->iocbq_lookup_len) {
+		psli->last_iotag = iotag;
+		psli->iocbq_lookup[iotag] = iocbq;
+		spin_unlock_irq(&phba->hbalock);
+		iocbq->iotag = iotag;
+		return iotag;
+	} else if (psli->iocbq_lookup_len < (0xffff
+					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
+		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
+		spin_unlock_irq(&phba->hbalock);
+		new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
+				  GFP_KERNEL);
+		if (new_arr) {
+			spin_lock_irq(&phba->hbalock);
+			old_arr = psli->iocbq_lookup;
+			if (new_len <= psli->iocbq_lookup_len) {
+				/* highly unprobable case */
+				kfree(new_arr);
+				iotag = psli->last_iotag;
+				if(++iotag < psli->iocbq_lookup_len) {
+					psli->last_iotag = iotag;
+					psli->iocbq_lookup[iotag] = iocbq;
+					spin_unlock_irq(&phba->hbalock);
+					iocbq->iotag = iotag;
+					return iotag;
+				}
+				spin_unlock_irq(&phba->hbalock);
+				return 0;
+			}
+			if (psli->iocbq_lookup)
+				memcpy(new_arr, old_arr,
+				       ((psli->last_iotag  + 1) *
+					sizeof (struct lpfc_iocbq *)));
+			psli->iocbq_lookup = new_arr;
+			psli->iocbq_lookup_len = new_len;
+			psli->last_iotag = iotag;
+			psli->iocbq_lookup[iotag] = iocbq;
+			spin_unlock_irq(&phba->hbalock);
+			iocbq->iotag = iotag;
+			kfree(old_arr);
+			return iotag;
+		}
+	} else
+		spin_unlock_irq(&phba->hbalock);
+
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
+			psli->last_iotag);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_submit_iocb - Submit an iocb to the firmware
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocb: Pointer to iocb slot in the ring.
+ * @nextiocb: Pointer to driver iocb object which need to be
+ *            posted to firmware.
+ *
+ * This function is called with hbalock held to post a new iocb to
+ * the firmware. This function copies the new iocb to ring iocb slot and
+ * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * a completion call back for this iocb else the function will free the
+ * iocb object.
+ **/
+static void
+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
+{
+	lockdep_assert_held(&phba->hbalock);
+	/*
+	 * Set up an iotag
+	 */
+	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+
+
+	if (pring->ringno == LPFC_ELS_RING) {
+		lpfc_debugfs_slow_ring_trc(phba,
+			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
+			*(((uint32_t *) &nextiocb->iocb) + 4),
+			*(((uint32_t *) &nextiocb->iocb) + 6),
+			*(((uint32_t *) &nextiocb->iocb) + 7));
+	}
+
+	/*
+	 * Issue iocb command to adapter
+	 */
+	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
+	wmb();
+	pring->stats.iocb_cmd++;
+
+	/*
+	 * If there is no completion routine to call, we can release the
+	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
+	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+	 */
+	if (nextiocb->iocb_cmpl)
+		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
+	else
+		__lpfc_sli_release_iocbq(phba, nextiocb);
+
+	/*
+	 * Let the HBA know what IOCB slot will be the next one the
+	 * driver will put a command into.
+	 */
+	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
+	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
+}
+
+/**
+ * lpfc_sli_update_full_ring - Update the chip attention register
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * The caller is not required to hold any lock for calling this function.
+ * This function updates the chip attention bits for the ring to inform firmware
+ * that there are pending work to be done for this ring and requests an
+ * interrupt when there is space available in the ring. This function is
+ * called when the driver is unable to post more iocbs to the ring due
+ * to unavailability of space in the ring.
+ **/
+static void
+lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	int ringno = pring->ringno;
+
+	pring->flag |= LPFC_CALL_RING_AVAILABLE;
+
+	wmb();
+
+	/*
+	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
+	 * The HBA will tell us when an IOCB entry is available.
+	 */
+	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
+	readl(phba->CAregaddr); /* flush */
+
+	pring->stats.iocb_cmd_full++;
+}
+
+/**
+ * lpfc_sli_update_ring - Update chip attention register
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function updates the chip attention register bit for the
+ * given ring to inform HBA that there is more work to be done
+ * in this ring. The caller is not required to hold any lock.
+ **/
+static void
+lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	int ringno = pring->ringno;
+
+	/*
+	 * Tell the HBA that there is work to do in this ring.
+	 */
+	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
+		wmb();
+		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+	}
+}
+
+/**
+ * lpfc_sli_resume_iocb - Process iocbs in the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to post pending iocbs
+ * in the txq to the firmware. This function is called when driver
+ * detects space available in the ring.
+ **/
+static void
+lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	IOCB_t *iocb;
+	struct lpfc_iocbq *nextiocb;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	/*
+	 * Check to see if:
+	 *  (a) there is anything on the txq to send
+	 *  (b) link is up
+	 *  (c) link attention events can be processed (fcp ring only)
+	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
+	 */
+
+	if (lpfc_is_link_up(phba) &&
+	    (!list_empty(&pring->txq)) &&
+	    (pring->ringno != LPFC_FCP_RING ||
+	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
+
+		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
+			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+		if (iocb)
+			lpfc_sli_update_ring(phba, pring);
+		else
+			lpfc_sli_update_full_ring(phba, pring);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function is called with hbalock held to get the next
+ * available slot for the given HBQ. If there is free slot
+ * available for the HBQ it will return pointer to the next available
+ * HBQ entry else it will return NULL.
+ **/
+static struct lpfc_hbq_entry *
+lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
+{
+	struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+	lockdep_assert_held(&phba->hbalock);
+
+	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
+	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
+		hbqp->next_hbqPutIdx = 0;
+
+	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
+		uint32_t raw_index = phba->hbq_get[hbqno];
+		uint32_t getidx = le32_to_cpu(raw_index);
+
+		hbqp->local_hbqGetIdx = getidx;
+
+		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
+			lpfc_printf_log(phba, KERN_ERR,
+					LOG_SLI | LOG_VPORT,
+					"1802 HBQ %d: local_hbqGetIdx "
+					"%u is > than hbqp->entry_count %u\n",
+					hbqno, hbqp->local_hbqGetIdx,
+					hbqp->entry_count);
+
+			phba->link_state = LPFC_HBA_ERROR;
+			return NULL;
+		}
+
+		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
+			return NULL;
+	}
+
+	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
+			hbqp->hbqPutIdx;
+}
+
+/**
+ * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held to free all the
+ * hbq buffers while uninitializing the SLI interface. It also
+ * frees the HBQ buffers returned by the firmware but not yet
+ * processed by the upper layers.
+ **/
+void
+lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
+{
+	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+	struct hbq_dmabuf *hbq_buf;
+	unsigned long flags;
+	int i, hbq_count;
+
+	hbq_count = lpfc_sli_hbq_count();
+	/* Return all memory used by all HBQs */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	for (i = 0; i < hbq_count; ++i) {
+		list_for_each_entry_safe(dmabuf, next_dmabuf,
+				&phba->hbqs[i].hbq_buffer_list, list) {
+			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+			list_del(&hbq_buf->dbuf.list);
+			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
+		}
+		phba->hbqs[i].buffer_count = 0;
+	}
+
+	/* Mark the HBQs not in use */
+	phba->hbq_in_use = 0;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a
+ * hbq buffer to the firmware. If the function finds an empty
+ * slot in the HBQ, it will post the buffer. The function will return
+ * pointer to the hbq entry if it successfully post the buffer
+ * else it will return NULL.
+ **/
+static int
+lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
+			 struct hbq_dmabuf *hbq_buf)
+{
+	lockdep_assert_held(&phba->hbalock);
+	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a hbq buffer to the
+ * firmware. If the function finds an empty slot in the HBQ, it will post the
+ * buffer and place it on the hbq_buffer_list. The function will return zero if
+ * it successfully post the buffer else it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
+			    struct hbq_dmabuf *hbq_buf)
+{
+	struct lpfc_hbq_entry *hbqe;
+	dma_addr_t physaddr = hbq_buf->dbuf.phys;
+
+	lockdep_assert_held(&phba->hbalock);
+	/* Get next HBQ entry slot to use */
+	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
+	if (hbqe) {
+		struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
+		hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
+		hbqe->bde.tus.f.bdeFlags = 0;
+		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
+		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
+				/* Sync SLIM */
+		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
+		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
+				/* flush */
+		readl(phba->hbq_put + hbqno);
+		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
+		return 0;
+	} else
+		return -ENOMEM;
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post an RQE to the SLI4
+ * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
+ * the hbq_buffer_list and return zero, otherwise it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
+			    struct hbq_dmabuf *hbq_buf)
+{
+	int rc;
+	struct lpfc_rqe hrqe;
+	struct lpfc_rqe drqe;
+	struct lpfc_queue *hrq;
+	struct lpfc_queue *drq;
+
+	if (hbqno != LPFC_ELS_HBQ)
+		return 1;
+	hrq = phba->sli4_hba.hdr_rq;
+	drq = phba->sli4_hba.dat_rq;
+
+	lockdep_assert_held(&phba->hbalock);
+	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
+	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
+	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
+	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
+	rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
+	if (rc < 0)
+		return rc;
+	hbq_buf->tag = (rc | (hbqno << 16));
+	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
+	return 0;
+}
+
+/* HBQ for ELS and CT traffic. */
+static struct lpfc_hbq_init lpfc_els_hbq = {
+	.rn = 1,
+	.entry_count = 256,
+	.mask_count = 0,
+	.profile = 0,
+	.ring_mask = (1 << LPFC_ELS_RING),
+	.buffer_count = 0,
+	.init_count = 40,
+	.add_count = 40,
+};
+
+/* Array of HBQs */
+struct lpfc_hbq_init *lpfc_hbq_defs[] = {
+	&lpfc_els_hbq,
+};
+
+/**
+ * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @count: Number of HBQ buffers to be posted.
+ *
+ * This function is called with no lock held to post more hbq buffers to the
+ * given HBQ. The function returns the number of HBQ buffers successfully
+ * posted.
+ **/
+static int
+lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
+{
+	uint32_t i, posted = 0;
+	unsigned long flags;
+	struct hbq_dmabuf *hbq_buffer;
+	LIST_HEAD(hbq_buf_list);
+	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
+		return 0;
+
+	if ((phba->hbqs[hbqno].buffer_count + count) >
+	    lpfc_hbq_defs[hbqno]->entry_count)
+		count = lpfc_hbq_defs[hbqno]->entry_count -
+					phba->hbqs[hbqno].buffer_count;
+	if (!count)
+		return 0;
+	/* Allocate HBQ entries */
+	for (i = 0; i < count; i++) {
+		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
+		if (!hbq_buffer)
+			break;
+		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
+	}
+	/* Check whether HBQ is still in use */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	if (!phba->hbq_in_use)
+		goto err;
+	while (!list_empty(&hbq_buf_list)) {
+		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+				 dbuf.list);
+		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
+				      (hbqno << 16));
+		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+			phba->hbqs[hbqno].buffer_count++;
+			posted++;
+		} else
+			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return posted;
+err:
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	while (!list_empty(&hbq_buf_list)) {
+		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+				 dbuf.list);
+		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ number.
+ *
+ * This function posts more buffers to the HBQ. This function
+ * is called with no lock held. The function returns the number of HBQ entries
+ * successfully allocated.
+ **/
+int
+lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		return 0;
+	else
+		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+					 lpfc_hbq_defs[qno]->add_count);
+}
+
+/**
+ * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
+ * @phba: Pointer to HBA context object.
+ * @qno:  HBQ queue number.
+ *
+ * This function is called from SLI initialization code path with
+ * no lock held to post initial HBQ buffers to firmware. The
+ * function returns the number of HBQ entries successfully allocated.
+ **/
+static int
+lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+					lpfc_hbq_defs[qno]->entry_count);
+	else
+		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+					 lpfc_hbq_defs[qno]->init_count);
+}
+
+/**
+ * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first hbq buffer on an hbq list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_get(struct list_head *rb_list)
+{
+	struct lpfc_dmabuf *d_buf;
+
+	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
+	if (!d_buf)
+		return NULL;
+	return container_of(d_buf, struct hbq_dmabuf, dbuf);
+}
+
+/**
+ * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first RQ buffer on an RQ buffer list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct rqb_dmabuf *
+lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
+{
+	struct lpfc_dmabuf *h_buf;
+	struct lpfc_rqb *rqbp;
+
+	rqbp = hrq->rqbp;
+	list_remove_head(&rqbp->rqb_buffer_list, h_buf,
+			 struct lpfc_dmabuf, list);
+	if (!h_buf)
+		return NULL;
+	rqbp->buffer_count--;
+	return container_of(h_buf, struct rqb_dmabuf, hbuf);
+}
+
+/**
+ * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
+ * @phba: Pointer to HBA context object.
+ * @tag: Tag of the hbq buffer.
+ *
+ * This function searches for the hbq buffer associated with the given tag in
+ * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
+ * otherwise it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
+{
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *hbq_buf;
+	uint32_t hbqno;
+
+	hbqno = tag >> 16;
+	if (hbqno >= LPFC_MAX_HBQS)
+		return NULL;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
+		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		if (hbq_buf->tag == tag) {
+			spin_unlock_irq(&phba->hbalock);
+			return hbq_buf;
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
+			"1803 Bad hbq tag. Data: x%x x%x\n",
+			tag, phba->hbqs[tag >> 16].buffer_count);
+	return NULL;
+}
+
+/**
+ * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
+ * @phba: Pointer to HBA context object.
+ * @hbq_buffer: Pointer to HBQ buffer.
+ *
+ * This function is called with hbalock. This function gives back
+ * the hbq buffer to firmware. If the HBQ does not have space to
+ * post the buffer, it will free the buffer.
+ **/
+void
+lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
+{
+	uint32_t hbqno;
+
+	if (hbq_buffer) {
+		hbqno = hbq_buffer->tag >> 16;
+		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
+			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+	}
+}
+
+/**
+ * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
+ * @mbxCommand: mailbox command code.
+ *
+ * This function is called by the mailbox event handler function to verify
+ * that the completed mailbox command is a legitimate mailbox command. If the
+ * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
+ * and the mailbox event handler will take the HBA offline.
+ **/
+static int
+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
+{
+	uint8_t ret;
+
+	switch (mbxCommand) {
+	case MBX_LOAD_SM:
+	case MBX_READ_NV:
+	case MBX_WRITE_NV:
+	case MBX_WRITE_VPARMS:
+	case MBX_RUN_BIU_DIAG:
+	case MBX_INIT_LINK:
+	case MBX_DOWN_LINK:
+	case MBX_CONFIG_LINK:
+	case MBX_CONFIG_RING:
+	case MBX_RESET_RING:
+	case MBX_READ_CONFIG:
+	case MBX_READ_RCONFIG:
+	case MBX_READ_SPARM:
+	case MBX_READ_STATUS:
+	case MBX_READ_RPI:
+	case MBX_READ_XRI:
+	case MBX_READ_REV:
+	case MBX_READ_LNK_STAT:
+	case MBX_REG_LOGIN:
+	case MBX_UNREG_LOGIN:
+	case MBX_CLEAR_LA:
+	case MBX_DUMP_MEMORY:
+	case MBX_DUMP_CONTEXT:
+	case MBX_RUN_DIAGS:
+	case MBX_RESTART:
+	case MBX_UPDATE_CFG:
+	case MBX_DOWN_LOAD:
+	case MBX_DEL_LD_ENTRY:
+	case MBX_RUN_PROGRAM:
+	case MBX_SET_MASK:
+	case MBX_SET_VARIABLE:
+	case MBX_UNREG_D_ID:
+	case MBX_KILL_BOARD:
+	case MBX_CONFIG_FARP:
+	case MBX_BEACON:
+	case MBX_LOAD_AREA:
+	case MBX_RUN_BIU_DIAG64:
+	case MBX_CONFIG_PORT:
+	case MBX_READ_SPARM64:
+	case MBX_READ_RPI64:
+	case MBX_REG_LOGIN64:
+	case MBX_READ_TOPOLOGY:
+	case MBX_WRITE_WWN:
+	case MBX_SET_DEBUG:
+	case MBX_LOAD_EXP_ROM:
+	case MBX_ASYNCEVT_ENABLE:
+	case MBX_REG_VPI:
+	case MBX_UNREG_VPI:
+	case MBX_HEARTBEAT:
+	case MBX_PORT_CAPABILITIES:
+	case MBX_PORT_IOV_CONTROL:
+	case MBX_SLI4_CONFIG:
+	case MBX_SLI4_REQ_FTRS:
+	case MBX_REG_FCFI:
+	case MBX_UNREG_FCFI:
+	case MBX_REG_VFI:
+	case MBX_UNREG_VFI:
+	case MBX_INIT_VPI:
+	case MBX_INIT_VFI:
+	case MBX_RESUME_RPI:
+	case MBX_READ_EVENT_LOG_STATUS:
+	case MBX_READ_EVENT_LOG:
+	case MBX_SECURITY_MGMT:
+	case MBX_AUTH_PORT:
+	case MBX_ACCESS_VDATA:
+		ret = mbxCommand;
+		break;
+	default:
+		ret = MBX_SHUTDOWN;
+		break;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_sli_issue_mbox_wait function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+void
+lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	wait_queue_head_t *pdone_q;
+	unsigned long drvr_flag;
+
+	/*
+	 * If pdone_q is empty, the driver thread gave up waiting and
+	 * continued running.
+	 */
+	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	pdone_q = (wait_queue_head_t *) pmboxq->context1;
+	if (pdone_q)
+		wake_up_interruptible(pdone_q);
+	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+	return;
+}
+
+
+/**
+ * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the default mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. If the completed command is a REG_LOGIN mailbox command,
+ * this function will issue a UREG_LOGIN to re-claim the RPI.
+ **/
+void
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport  *vport = pmb->vport;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	uint16_t rpi, vpi;
+	int rc;
+
+	mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+	if (mp) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+	}
+
+	/*
+	 * If a REG_LOGIN succeeded  after node is destroyed or node
+	 * is in re-discovery driver need to cleanup the RPI.
+	 */
+	if (!(phba->pport->load_flag & FC_UNLOADING) &&
+	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
+	    !pmb->u.mb.mbxStatus) {
+		rpi = pmb->u.mb.un.varWords[0];
+		vpi = pmb->u.mb.un.varRegLogin.vpi;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
+		lpfc_unreg_login(phba, vpi, rpi, pmb);
+		pmb->vport = vport;
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc != MBX_NOT_FINISHED)
+			return;
+	}
+
+	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+		!(phba->pport->load_flag & FC_UNLOADING) &&
+		!pmb->u.mb.mbxStatus) {
+		shost = lpfc_shost_from_vport(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->vpi_state |= LPFC_VPI_REGISTERED;
+		vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+		ndlp = (struct lpfc_nodelist *)pmb->context2;
+		lpfc_nlp_put(ndlp);
+		pmb->context2 = NULL;
+	}
+
+	/* Check security permission status on INIT_LINK mailbox command */
+	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
+	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"2860 SLI authentication is required "
+				"for INIT_LINK but has not done yet\n");
+
+	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
+		lpfc_sli4_mbox_cmd_free(phba, pmb);
+	else
+		mempool_free(pmb, phba->mbox_mem_pool);
+}
+ /**
+ * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the unreg rpi mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. An additional refrenece is put on the ndlp to prevent
+ * lpfc_nlp_release from freeing the rpi bit in the bitmask before
+ * the unreg mailbox command completes, this routine puts the
+ * reference back.
+ *
+ **/
+void
+lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport  *vport = pmb->vport;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = pmb->context1;
+	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+		    (bf_get(lpfc_sli_intf_if_type,
+		     &phba->sli4_hba.sli_intf) ==
+		     LPFC_SLI_INTF_IF_TYPE_2)) {
+			if (ndlp) {
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+						 "0010 UNREG_LOGIN vpi:%x "
+						 "rpi:%x DID:%x map:%x %p\n",
+						 vport->vpi, ndlp->nlp_rpi,
+						 ndlp->nlp_DID,
+						 ndlp->nlp_usg_map, ndlp);
+				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+				lpfc_nlp_put(ndlp);
+			}
+		}
+	}
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the completed mailbox commands and gives it to upper layers. The interrupt
+ * service routine processes mailbox completion interrupt and adds completed
+ * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
+ * Worker thread call lpfc_sli_handle_mb_event, which will return the
+ * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
+ * function returns the mailbox commands to the upper layer by calling the
+ * completion handler function of each mailbox.
+ **/
+int
+lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
+{
+	MAILBOX_t *pmbox;
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+	LIST_HEAD(cmplq);
+
+	phba->sli.slistat.mbox_event++;
+
+	/* Get all completed mailboxe buffers into the cmplq */
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Get a Mailbox buffer to setup mailbox commands for callback */
+	do {
+		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
+		if (pmb == NULL)
+			break;
+
+		pmbox = &pmb->u.mb;
+
+		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
+			if (pmb->vport) {
+				lpfc_debugfs_disc_trc(pmb->vport,
+					LPFC_DISC_TRC_MBOX_VPORT,
+					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
+					(uint32_t)pmbox->mbxCommand,
+					pmbox->un.varWords[0],
+					pmbox->un.varWords[1]);
+			}
+			else {
+				lpfc_debugfs_disc_trc(phba->pport,
+					LPFC_DISC_TRC_MBOX,
+					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
+					(uint32_t)pmbox->mbxCommand,
+					pmbox->un.varWords[0],
+					pmbox->un.varWords[1]);
+			}
+		}
+
+		/*
+		 * It is a fatal error if unknown mbox command completion.
+		 */
+		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
+		    MBX_SHUTDOWN) {
+			/* Unknown mailbox command compl */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):0323 Unknown Mailbox command "
+					"x%x (x%x/x%x) Cmpl\n",
+					pmb->vport ? pmb->vport->vpi : 0,
+					pmbox->mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									pmb),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									pmb));
+			phba->link_state = LPFC_HBA_ERROR;
+			phba->work_hs = HS_FFER3;
+			lpfc_handle_eratt(phba);
+			continue;
+		}
+
+		if (pmbox->mbxStatus) {
+			phba->sli.slistat.mbox_stat_err++;
+			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
+				/* Mbox cmd cmpl error - RETRYing */
+				lpfc_printf_log(phba, KERN_INFO,
+					LOG_MBOX | LOG_SLI,
+					"(%d):0305 Mbox cmd cmpl "
+					"error - RETRYing Data: x%x "
+					"(x%x/x%x) x%x x%x x%x\n",
+					pmb->vport ? pmb->vport->vpi : 0,
+					pmbox->mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									pmb),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									pmb),
+					pmbox->mbxStatus,
+					pmbox->un.varWords[0],
+					pmb->vport->port_state);
+				pmbox->mbxStatus = 0;
+				pmbox->mbxOwner = OWN_HOST;
+				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+				if (rc != MBX_NOT_FINISHED)
+					continue;
+			}
+		}
+
+		/* Mailbox cmd <cmd> Cmpl <cmpl> */
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+				"x%x x%x x%x\n",
+				pmb->vport ? pmb->vport->vpi : 0,
+				pmbox->mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, pmb),
+				lpfc_sli_config_mbox_opcode_get(phba, pmb),
+				pmb->mbox_cmpl,
+				*((uint32_t *) pmbox),
+				pmbox->un.varWords[0],
+				pmbox->un.varWords[1],
+				pmbox->un.varWords[2],
+				pmbox->un.varWords[3],
+				pmbox->un.varWords[4],
+				pmbox->un.varWords[5],
+				pmbox->un.varWords[6],
+				pmbox->un.varWords[7],
+				pmbox->un.varWords[8],
+				pmbox->un.varWords[9],
+				pmbox->un.varWords[10]);
+
+		if (pmb->mbox_cmpl)
+			pmb->mbox_cmpl(phba,pmb);
+	} while (1);
+	return 0;
+}
+
+/**
+ * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: buffer tag.
+ *
+ * This function is called with no lock held. When QUE_BUFTAG_BIT bit
+ * is set in the tag the buffer is posted for a particular exchange,
+ * the function will return the buffer without replacing the buffer.
+ * If the buffer is for unsolicited ELS or CT traffic, this function
+ * returns the buffer and also posts another buffer to the firmware.
+ **/
+static struct lpfc_dmabuf *
+lpfc_sli_get_buff(struct lpfc_hba *phba,
+		  struct lpfc_sli_ring *pring,
+		  uint32_t tag)
+{
+	struct hbq_dmabuf *hbq_entry;
+
+	if (tag & QUE_BUFTAG_BIT)
+		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
+	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+	if (!hbq_entry)
+		return NULL;
+	return &hbq_entry->dbuf;
+}
+
+/**
+ * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
+ * @fch_r_ctl: the r_ctl for the first frame of the sequence.
+ * @fch_type: the type for the first frame of the sequence.
+ *
+ * This function is called with no lock held. This function uses the r_ctl and
+ * type of the received sequence to find the correct callback function to call
+ * to process the sequence.
+ **/
+static int
+lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
+			 uint32_t fch_type)
+{
+	int i;
+
+	switch (fch_type) {
+	case FC_TYPE_NVME:
+		lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
+		return 1;
+	default:
+		break;
+	}
+
+	/* unSolicited Responses */
+	if (pring->prt[0].profile) {
+		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
+			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
+									saveq);
+		return 1;
+	}
+	/* We must search, based on rctl / type
+	   for the right routine */
+	for (i = 0; i < pring->num_mask; i++) {
+		if ((pring->prt[i].rctl == fch_r_ctl) &&
+		    (pring->prt[i].type == fch_type)) {
+			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
+				(pring->prt[i].lpfc_sli_rcv_unsol_event)
+						(phba, pring, saveq);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the unsolicited iocb.
+ *
+ * This function is called with no lock held by the ring event handler
+ * when there is an unsolicited iocb posted to the response ring by the
+ * firmware. This function gets the buffer associated with the iocbs
+ * and calls the event handler for the ring. This function handles both
+ * qring buffers and hbq buffers.
+ * When the function returns 1 the caller can free the iocb object otherwise
+ * upper layer functions will free the iocb objects.
+ **/
+static int
+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			    struct lpfc_iocbq *saveq)
+{
+	IOCB_t           * irsp;
+	WORD5            * w5p;
+	uint32_t           Rctl, Type;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_dmabuf *dmzbuf;
+
+	irsp = &(saveq->iocb);
+
+	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
+		if (pring->lpfc_sli_rcv_async_status)
+			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
+		else
+			lpfc_printf_log(phba,
+					KERN_WARNING,
+					LOG_SLI,
+					"0316 Ring %d handler: unexpected "
+					"ASYNC_STATUS iocb received evt_code "
+					"0x%x\n",
+					pring->ringno,
+					irsp->un.asyncstat.evt_code);
+		return 1;
+	}
+
+	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
+		(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+		if (irsp->ulpBdeCount > 0) {
+			dmzbuf = lpfc_sli_get_buff(phba, pring,
+					irsp->un.ulpWord[3]);
+			lpfc_in_buf_free(phba, dmzbuf);
+		}
+
+		if (irsp->ulpBdeCount > 1) {
+			dmzbuf = lpfc_sli_get_buff(phba, pring,
+					irsp->unsli3.sli3Words[3]);
+			lpfc_in_buf_free(phba, dmzbuf);
+		}
+
+		if (irsp->ulpBdeCount > 2) {
+			dmzbuf = lpfc_sli_get_buff(phba, pring,
+				irsp->unsli3.sli3Words[7]);
+			lpfc_in_buf_free(phba, dmzbuf);
+		}
+
+		return 1;
+	}
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		if (irsp->ulpBdeCount != 0) {
+			saveq->context2 = lpfc_sli_get_buff(phba, pring,
+						irsp->un.ulpWord[3]);
+			if (!saveq->context2)
+				lpfc_printf_log(phba,
+					KERN_ERR,
+					LOG_SLI,
+					"0341 Ring %d Cannot find buffer for "
+					"an unsolicited iocb. tag 0x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[3]);
+		}
+		if (irsp->ulpBdeCount == 2) {
+			saveq->context3 = lpfc_sli_get_buff(phba, pring,
+						irsp->unsli3.sli3Words[7]);
+			if (!saveq->context3)
+				lpfc_printf_log(phba,
+					KERN_ERR,
+					LOG_SLI,
+					"0342 Ring %d Cannot find buffer for an"
+					" unsolicited iocb. tag 0x%x\n",
+					pring->ringno,
+					irsp->unsli3.sli3Words[7]);
+		}
+		list_for_each_entry(iocbq, &saveq->list, list) {
+			irsp = &(iocbq->iocb);
+			if (irsp->ulpBdeCount != 0) {
+				iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+							irsp->un.ulpWord[3]);
+				if (!iocbq->context2)
+					lpfc_printf_log(phba,
+						KERN_ERR,
+						LOG_SLI,
+						"0343 Ring %d Cannot find "
+						"buffer for an unsolicited iocb"
+						". tag 0x%x\n", pring->ringno,
+						irsp->un.ulpWord[3]);
+			}
+			if (irsp->ulpBdeCount == 2) {
+				iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+						irsp->unsli3.sli3Words[7]);
+				if (!iocbq->context3)
+					lpfc_printf_log(phba,
+						KERN_ERR,
+						LOG_SLI,
+						"0344 Ring %d Cannot find "
+						"buffer for an unsolicited "
+						"iocb. tag 0x%x\n",
+						pring->ringno,
+						irsp->unsli3.sli3Words[7]);
+			}
+		}
+	}
+	if (irsp->ulpBdeCount != 0 &&
+	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
+	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
+		int found = 0;
+
+		/* search continue save q for same XRI */
+		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
+			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
+				saveq->iocb.unsli3.rcvsli3.ox_id) {
+				list_add_tail(&saveq->list, &iocbq->list);
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			list_add_tail(&saveq->clist,
+				      &pring->iocb_continue_saveq);
+		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
+			list_del_init(&iocbq->clist);
+			saveq = iocbq;
+			irsp = &(saveq->iocb);
+		} else
+			return 0;
+	}
+	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
+	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
+	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
+		Rctl = FC_RCTL_ELS_REQ;
+		Type = FC_TYPE_ELS;
+	} else {
+		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
+		Rctl = w5p->hcsw.Rctl;
+		Type = w5p->hcsw.Type;
+
+		/* Firmware Workaround */
+		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
+			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
+			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+			Rctl = FC_RCTL_ELS_REQ;
+			Type = FC_TYPE_ELS;
+			w5p->hcsw.Rctl = Rctl;
+			w5p->hcsw.Type = Type;
+		}
+	}
+
+	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0313 Ring %d handler: unexpected Rctl x%x "
+				"Type x%x received\n",
+				pring->ringno, Rctl, Type);
+
+	return 1;
+}
+
+/**
+ * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @prspiocb: Pointer to response iocb object.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given response iocb using the iotag of the
+ * response iocb. This function is called with the hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
+		      struct lpfc_sli_ring *pring,
+		      struct lpfc_iocbq *prspiocb)
+{
+	struct lpfc_iocbq *cmd_iocb = NULL;
+	uint16_t iotag;
+	lockdep_assert_held(&phba->hbalock);
+
+	iotag = prspiocb->iocb.ulpIoTag;
+
+	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+		cmd_iocb = phba->sli.iocbq_lookup[iotag];
+		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+			/* remove from txcmpl queue list */
+			list_del_init(&cmd_iocb->list);
+			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+			return cmd_iocb;
+		}
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0317 iotag x%x is out of "
+			"range: max iotag x%x wd0 x%x\n",
+			iotag, phba->sli.last_iotag,
+			*(((uint32_t *) &prspiocb->iocb) + 7));
+	return NULL;
+}
+
+/**
+ * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iotag: IOCB tag.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given iotag. This function is called with the
+ * hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
+			     struct lpfc_sli_ring *pring, uint16_t iotag)
+{
+	struct lpfc_iocbq *cmd_iocb = NULL;
+
+	lockdep_assert_held(&phba->hbalock);
+	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+		cmd_iocb = phba->sli.iocbq_lookup[iotag];
+		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+			/* remove from txcmpl queue list */
+			list_del_init(&cmd_iocb->list);
+			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+			return cmd_iocb;
+		}
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0372 iotag x%x lookup error: max iotag (x%x) "
+			"iocb_flag x%x\n",
+			iotag, phba->sli.last_iotag,
+			cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
+	return NULL;
+}
+
+/**
+ * lpfc_sli_process_sol_iocb - process solicited iocb completion
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the response iocb to be processed.
+ *
+ * This function is called by the ring event handler for non-fcp
+ * rings when there is a new response iocb in the response ring.
+ * The caller is not required to hold any locks. This function
+ * gets the command iocb associated with the response iocb and
+ * calls the completion handler for the command iocb. If there
+ * is no completion handler, the function will free the resources
+ * associated with command iocb. If the response iocb is for
+ * an already aborted command iocb, the status of the completion
+ * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
+ * This function always returns 1.
+ **/
+static int
+lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			  struct lpfc_iocbq *saveq)
+{
+	struct lpfc_iocbq *cmdiocbp;
+	int rc = 1;
+	unsigned long iflag;
+
+	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	if (cmdiocbp) {
+		if (cmdiocbp->iocb_cmpl) {
+			/*
+			 * If an ELS command failed send an event to mgmt
+			 * application.
+			 */
+			if (saveq->iocb.ulpStatus &&
+			     (pring->ringno == LPFC_ELS_RING) &&
+			     (cmdiocbp->iocb.ulpCommand ==
+				CMD_ELS_REQUEST64_CR))
+				lpfc_send_els_failure_event(phba,
+					cmdiocbp, saveq);
+
+			/*
+			 * Post all ELS completions to the worker thread.
+			 * All other are passed to the completion callback.
+			 */
+			if (pring->ringno == LPFC_ELS_RING) {
+				if ((phba->sli_rev < LPFC_SLI_REV4) &&
+				    (cmdiocbp->iocb_flag &
+							LPFC_DRIVER_ABORTED)) {
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+					cmdiocbp->iocb_flag &=
+						~LPFC_DRIVER_ABORTED;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+					saveq->iocb.ulpStatus =
+						IOSTAT_LOCAL_REJECT;
+					saveq->iocb.un.ulpWord[4] =
+						IOERR_SLI_ABORTED;
+
+					/* Firmware could still be in progress
+					 * of DMAing payload, so don't free data
+					 * buffer till after a hbeat.
+					 */
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+				}
+				if (phba->sli_rev == LPFC_SLI_REV4) {
+					if (saveq->iocb_flag &
+					    LPFC_EXCHANGE_BUSY) {
+						/* Set cmdiocb flag for the
+						 * exchange busy so sgl (xri)
+						 * will not be released until
+						 * the abort xri is received
+						 * from hba.
+						 */
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb_flag |=
+							LPFC_EXCHANGE_BUSY;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+					}
+					if (cmdiocbp->iocb_flag &
+					    LPFC_DRIVER_ABORTED) {
+						/*
+						 * Clear LPFC_DRIVER_ABORTED
+						 * bit in case it was driver
+						 * initiated abort.
+						 */
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb_flag &=
+							~LPFC_DRIVER_ABORTED;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb.ulpStatus =
+							IOSTAT_LOCAL_REJECT;
+						cmdiocbp->iocb.un.ulpWord[4] =
+							IOERR_ABORT_REQUESTED;
+						/*
+						 * For SLI4, irsiocb contains
+						 * NO_XRI in sli_xritag, it
+						 * shall not affect releasing
+						 * sgl (xri) process.
+						 */
+						saveq->iocb.ulpStatus =
+							IOSTAT_LOCAL_REJECT;
+						saveq->iocb.un.ulpWord[4] =
+							IOERR_SLI_ABORTED;
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						saveq->iocb_flag |=
+							LPFC_DELAY_MEM_FREE;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+					}
+				}
+			}
+			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+		} else
+			lpfc_sli_release_iocbq(phba, cmdiocbp);
+	} else {
+		/*
+		 * Unknown initiating command based on the response iotag.
+		 * This could be the case on the ELS ring because of
+		 * lpfc_els_abort().
+		 */
+		if (pring->ringno != LPFC_ELS_RING) {
+			/*
+			 * Ring <ringno> handler: unexpected completion IoTag
+			 * <IoTag>
+			 */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					 "0322 Ring %d handler: "
+					 "unexpected completion IoTag x%x "
+					 "Data: x%x x%x x%x x%x\n",
+					 pring->ringno,
+					 saveq->iocb.ulpIoTag,
+					 saveq->iocb.ulpStatus,
+					 saveq->iocb.un.ulpWord[4],
+					 saveq->iocb.ulpCommand,
+					 saveq->iocb.ulpContext);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called from the iocb ring event handlers when
+ * put pointer is ahead of the get pointer for a ring. This function signal
+ * an error attention condition to the worker thread and the worker
+ * thread will transition the HBA to offline state.
+ **/
+static void
+lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+	/*
+	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
+	 * rsp ring <portRspMax>
+	 */
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0312 Ring %d handler: portRspPut %d "
+			"is bigger than rsp ring %d\n",
+			pring->ringno, le32_to_cpu(pgp->rspPutInx),
+			pring->sli.sli3.numRiocb);
+
+	phba->link_state = LPFC_HBA_ERROR;
+
+	/*
+	 * All error attention handlers are posted to
+	 * worker thread
+	 */
+	phba->work_ha |= HA_ERATT;
+	phba->work_hs = HS_FFER3;
+
+	lpfc_worker_wake_up(phba);
+
+	return;
+}
+
+/**
+ * lpfc_poll_eratt - Error attention polling timer timeout handler
+ * @ptr: Pointer to address of HBA context object.
+ *
+ * This function is invoked by the Error Attention polling timer when the
+ * timer times out. It will check the SLI Error Attention register for
+ * possible attention events. If so, it will post an Error Attention event
+ * and wake up worker thread to process it. Otherwise, it will set up the
+ * Error Attention polling timer for the next poll.
+ **/
+void lpfc_poll_eratt(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	uint32_t eratt = 0;
+	uint64_t sli_intr, cnt;
+
+	phba = (struct lpfc_hba *)ptr;
+
+	/* Here we will also keep track of interrupts per sec of the hba */
+	sli_intr = phba->sli.slistat.sli_intr;
+
+	if (phba->sli.slistat.sli_prev_intr > sli_intr)
+		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
+			sli_intr);
+	else
+		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
+
+	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
+	do_div(cnt, phba->eratt_poll_interval);
+	phba->sli.slistat.sli_ips = cnt;
+
+	phba->sli.slistat.sli_prev_intr = sli_intr;
+
+	/* Check chip HA register for error event */
+	eratt = lpfc_sli_check_eratt(phba);
+
+	if (eratt)
+		/* Tell the worker thread there is work to do */
+		lpfc_worker_wake_up(phba);
+	else
+		/* Restart the timer for next eratt poll */
+		mod_timer(&phba->eratt_poll,
+			  jiffies +
+			  msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+	return;
+}
+
+
+/**
+ * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the interrupt context when there is a ring
+ * event for the fcp ring. The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion. The function will call lpfc_sli_process_unsol_iocb
+ * function if this is an unsolicited iocb.
+ * This routine presumes LPFC_FCP_RING handling and doesn't bother
+ * to check it explicitly.
+ */
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
+				struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+	IOCB_t *irsp = NULL;
+	IOCB_t *entry = NULL;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+	struct lpfc_iocbq rspiocbq;
+	uint32_t status;
+	uint32_t portRspPut, portRspMax;
+	int rc = 1;
+	lpfc_iocb_type type;
+	unsigned long iflag;
+	uint32_t rsp_cmpl = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	pring->stats.iocb_event++;
+
+	/*
+	 * The next available response entry should never exceed the maximum
+	 * entries.  If it does, treat it as an adapter hardware error.
+	 */
+	portRspMax = pring->sli.sli3.numRiocb;
+	portRspPut = le32_to_cpu(pgp->rspPutInx);
+	if (unlikely(portRspPut >= portRspMax)) {
+		lpfc_sli_rsp_pointers_error(phba, pring);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return 1;
+	}
+	if (phba->fcp_ring_in_use) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return 1;
+	} else
+		phba->fcp_ring_in_use = 1;
+
+	rmb();
+	while (pring->sli.sli3.rspidx != portRspPut) {
+		/*
+		 * Fetch an entry off the ring and copy it into a local data
+		 * structure.  The copy involves a byte-swap since the
+		 * network byte order and pci byte orders are different.
+		 */
+		entry = lpfc_resp_iocb(phba, pring);
+		phba->last_completion_time = jiffies;
+
+		if (++pring->sli.sli3.rspidx >= portRspMax)
+			pring->sli.sli3.rspidx = 0;
+
+		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
+				      (uint32_t *) &rspiocbq.iocb,
+				      phba->iocb_rsp_size);
+		INIT_LIST_HEAD(&(rspiocbq.list));
+		irsp = &rspiocbq.iocb;
+
+		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
+		pring->stats.iocb_rsp++;
+		rsp_cmpl++;
+
+		if (unlikely(irsp->ulpStatus)) {
+			/*
+			 * If resource errors reported from HBA, reduce
+			 * queuedepths of the SCSI device.
+			 */
+			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+			     IOERR_NO_RESOURCES)) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				phba->lpfc_rampdown_queue_depth(phba);
+				spin_lock_irqsave(&phba->hbalock, iflag);
+			}
+
+			/* Rsp ring <ringno> error: IOCB */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0336 Rsp Ring %d error: IOCB Data: "
+					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[0],
+					irsp->un.ulpWord[1],
+					irsp->un.ulpWord[2],
+					irsp->un.ulpWord[3],
+					irsp->un.ulpWord[4],
+					irsp->un.ulpWord[5],
+					*(uint32_t *)&irsp->un1,
+					*((uint32_t *)&irsp->un1 + 1));
+		}
+
+		switch (type) {
+		case LPFC_ABORT_IOCB:
+		case LPFC_SOL_IOCB:
+			/*
+			 * Idle exchange closed via ABTS from port.  No iocb
+			 * resources need to be recovered.
+			 */
+			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+						"0333 IOCB cmd 0x%x"
+						" processed. Skipping"
+						" completion\n",
+						irsp->ulpCommand);
+				break;
+			}
+
+			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
+							 &rspiocbq);
+			if (unlikely(!cmdiocbq))
+				break;
+			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+			if (cmdiocbq->iocb_cmpl) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+						      &rspiocbq);
+				spin_lock_irqsave(&phba->hbalock, iflag);
+			}
+			break;
+		case LPFC_UNSOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			break;
+		default:
+			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+				char adaptermsg[LPFC_MAX_ADPTMSG];
+				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+				memcpy(&adaptermsg[0], (uint8_t *) irsp,
+				       MAX_MSG_DATA);
+				dev_warn(&((phba->pcidev)->dev),
+					 "lpfc%d: %s\n",
+					 phba->brd_no, adaptermsg);
+			} else {
+				/* Unknown IOCB command */
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0334 Unknown IOCB command "
+						"Data: x%x, x%x x%x x%x x%x\n",
+						type, irsp->ulpCommand,
+						irsp->ulpStatus,
+						irsp->ulpIoTag,
+						irsp->ulpContext);
+			}
+			break;
+		}
+
+		/*
+		 * The response IOCB has been processed.  Update the ring
+		 * pointer in SLIM.  If the port response put pointer has not
+		 * been updated, sync the pgp->rspPutInx and fetch the new port
+		 * response put pointer.
+		 */
+		writel(pring->sli.sli3.rspidx,
+			&phba->host_gp[pring->ringno].rspGetInx);
+
+		if (pring->sli.sli3.rspidx == portRspPut)
+			portRspPut = le32_to_cpu(pgp->rspPutInx);
+	}
+
+	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
+		pring->stats.iocb_rsp_full++;
+		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+		writel(status, phba->CAregaddr);
+		readl(phba->CAregaddr);
+	}
+	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+		pring->stats.iocb_cmd_empty++;
+
+		/* Force update of the local copy of cmdGetInx */
+		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
+		lpfc_sli_resume_iocb(phba, pring);
+
+		if ((pring->lpfc_sli_cmd_available))
+			(pring->lpfc_sli_cmd_available) (phba, pring);
+
+	}
+
+	phba->fcp_ring_in_use = 0;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rc;
+}
+
+/**
+ * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @rspiocbp: Pointer to driver response IOCB object.
+ *
+ * This function is called from the worker thread when there is a slow-path
+ * response IOCB to process. This function chains all the response iocbs until
+ * seeing the iocb with the LE bit set. The function will call
+ * lpfc_sli_process_sol_iocb function if the response iocb indicates a
+ * completion of a command iocb. The function will call the
+ * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
+ * The function frees the resources or calls the completion handler if this
+ * iocb is an abort completion. The function returns NULL when the response
+ * iocb has the LE bit set and all the chained iocbs are processed, otherwise
+ * this function shall chain the iocb on to the iocb_continueq and return the
+ * response iocb passed in.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *rspiocbp)
+{
+	struct lpfc_iocbq *saveq;
+	struct lpfc_iocbq *cmdiocbp;
+	struct lpfc_iocbq *next_iocb;
+	IOCB_t *irsp = NULL;
+	uint32_t free_saveq;
+	uint8_t iocb_cmd_type;
+	lpfc_iocb_type type;
+	unsigned long iflag;
+	int rc;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	/* First add the response iocb to the countinueq list */
+	list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+	pring->iocb_continueq_cnt++;
+
+	/* Now, determine whether the list is completed for processing */
+	irsp = &rspiocbp->iocb;
+	if (irsp->ulpLe) {
+		/*
+		 * By default, the driver expects to free all resources
+		 * associated with this iocb completion.
+		 */
+		free_saveq = 1;
+		saveq = list_get_first(&pring->iocb_continueq,
+				       struct lpfc_iocbq, list);
+		irsp = &(saveq->iocb);
+		list_del_init(&pring->iocb_continueq);
+		pring->iocb_continueq_cnt = 0;
+
+		pring->stats.iocb_rsp++;
+
+		/*
+		 * If resource errors reported from HBA, reduce
+		 * queuedepths of the SCSI device.
+		 */
+		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+		    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+		     IOERR_NO_RESOURCES)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			phba->lpfc_rampdown_queue_depth(phba);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+		}
+
+		if (irsp->ulpStatus) {
+			/* Rsp ring <ringno> error: IOCB */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0328 Rsp Ring %d error: "
+					"IOCB Data: "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[0],
+					irsp->un.ulpWord[1],
+					irsp->un.ulpWord[2],
+					irsp->un.ulpWord[3],
+					irsp->un.ulpWord[4],
+					irsp->un.ulpWord[5],
+					*(((uint32_t *) irsp) + 6),
+					*(((uint32_t *) irsp) + 7),
+					*(((uint32_t *) irsp) + 8),
+					*(((uint32_t *) irsp) + 9),
+					*(((uint32_t *) irsp) + 10),
+					*(((uint32_t *) irsp) + 11),
+					*(((uint32_t *) irsp) + 12),
+					*(((uint32_t *) irsp) + 13),
+					*(((uint32_t *) irsp) + 14),
+					*(((uint32_t *) irsp) + 15));
+		}
+
+		/*
+		 * Fetch the IOCB command type and call the correct completion
+		 * routine. Solicited and Unsolicited IOCBs on the ELS ring
+		 * get freed back to the lpfc_iocb_list by the discovery
+		 * kernel thread.
+		 */
+		iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+		type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+		switch (type) {
+		case LPFC_SOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			break;
+
+		case LPFC_UNSOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			if (!rc)
+				free_saveq = 0;
+			break;
+
+		case LPFC_ABORT_IOCB:
+			cmdiocbp = NULL;
+			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+				cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
+								 saveq);
+			if (cmdiocbp) {
+				/* Call the specified completion routine */
+				if (cmdiocbp->iocb_cmpl) {
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+					(cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
+							      saveq);
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+				} else
+					__lpfc_sli_release_iocbq(phba,
+								 cmdiocbp);
+			}
+			break;
+
+		case LPFC_UNKNOWN_IOCB:
+			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+				char adaptermsg[LPFC_MAX_ADPTMSG];
+				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+				memcpy(&adaptermsg[0], (uint8_t *)irsp,
+				       MAX_MSG_DATA);
+				dev_warn(&((phba->pcidev)->dev),
+					 "lpfc%d: %s\n",
+					 phba->brd_no, adaptermsg);
+			} else {
+				/* Unknown IOCB command */
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0335 Unknown IOCB "
+						"command Data: x%x "
+						"x%x x%x x%x\n",
+						irsp->ulpCommand,
+						irsp->ulpStatus,
+						irsp->ulpIoTag,
+						irsp->ulpContext);
+			}
+			break;
+		}
+
+		if (free_saveq) {
+			list_for_each_entry_safe(rspiocbp, next_iocb,
+						 &saveq->list, list) {
+				list_del_init(&rspiocbp->list);
+				__lpfc_sli_release_iocbq(phba, rspiocbp);
+			}
+			__lpfc_sli_release_iocbq(phba, saveq);
+		}
+		rspiocbp = NULL;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rspiocbp;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This routine wraps the actual slow_ring event process routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
+				struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring event
+ * for non-fcp rings. The caller does not hold any lock. The function will
+ * remove each response iocb in the response ring and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_pgp *pgp;
+	IOCB_t *entry;
+	IOCB_t *irsp = NULL;
+	struct lpfc_iocbq *rspiocbp = NULL;
+	uint32_t portRspPut, portRspMax;
+	unsigned long iflag;
+	uint32_t status;
+
+	pgp = &phba->port_gp[pring->ringno];
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	pring->stats.iocb_event++;
+
+	/*
+	 * The next available response entry should never exceed the maximum
+	 * entries.  If it does, treat it as an adapter hardware error.
+	 */
+	portRspMax = pring->sli.sli3.numRiocb;
+	portRspPut = le32_to_cpu(pgp->rspPutInx);
+	if (portRspPut >= portRspMax) {
+		/*
+		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
+		 * rsp ring <portRspMax>
+		 */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0303 Ring %d handler: portRspPut %d "
+				"is bigger than rsp ring %d\n",
+				pring->ringno, portRspPut, portRspMax);
+
+		phba->link_state = LPFC_HBA_ERROR;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		phba->work_hs = HS_FFER3;
+		lpfc_handle_eratt(phba);
+
+		return;
+	}
+
+	rmb();
+	while (pring->sli.sli3.rspidx != portRspPut) {
+		/*
+		 * Build a completion list and call the appropriate handler.
+		 * The process is to get the next available response iocb, get
+		 * a free iocb from the list, copy the response data into the
+		 * free iocb, insert to the continuation list, and update the
+		 * next response index to slim.  This process makes response
+		 * iocb's in the ring available to DMA as fast as possible but
+		 * pays a penalty for a copy operation.  Since the iocb is
+		 * only 32 bytes, this penalty is considered small relative to
+		 * the PCI reads for register values and a slim write.  When
+		 * the ulpLe field is set, the entire Command has been
+		 * received.
+		 */
+		entry = lpfc_resp_iocb(phba, pring);
+
+		phba->last_completion_time = jiffies;
+		rspiocbp = __lpfc_sli_get_iocbq(phba);
+		if (rspiocbp == NULL) {
+			printk(KERN_ERR "%s: out of buffers! Failing "
+			       "completion.\n", __func__);
+			break;
+		}
+
+		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
+				      phba->iocb_rsp_size);
+		irsp = &rspiocbp->iocb;
+
+		if (++pring->sli.sli3.rspidx >= portRspMax)
+			pring->sli.sli3.rspidx = 0;
+
+		if (pring->ringno == LPFC_ELS_RING) {
+			lpfc_debugfs_slow_ring_trc(phba,
+			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
+				*(((uint32_t *) irsp) + 4),
+				*(((uint32_t *) irsp) + 6),
+				*(((uint32_t *) irsp) + 7));
+		}
+
+		writel(pring->sli.sli3.rspidx,
+			&phba->host_gp[pring->ringno].rspGetInx);
+
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		/* Handle the response IOCB */
+		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
+		spin_lock_irqsave(&phba->hbalock, iflag);
+
+		/*
+		 * If the port response put pointer has not been updated, sync
+		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
+		 * response put pointer.
+		 */
+		if (pring->sli.sli3.rspidx == portRspPut) {
+			portRspPut = le32_to_cpu(pgp->rspPutInx);
+		}
+	} /* while (pring->sli.sli3.rspidx != portRspPut) */
+
+	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
+		/* At least one response entry has been freed */
+		pring->stats.iocb_rsp_full++;
+		/* SET RxRE_RSP in Chip Att register */
+		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+		writel(status, phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+	}
+	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+		pring->stats.iocb_cmd_empty++;
+
+		/* Force update of the local copy of cmdGetInx */
+		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
+		lpfc_sli_resume_iocb(phba, pring);
+
+		if ((pring->lpfc_sli_cmd_available))
+			(pring->lpfc_sli_cmd_available) (phba, pring);
+
+	}
+
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a pending
+ * ELS response iocb on the driver internal slow-path response iocb worker
+ * queue. The caller does not hold any lock. The function will remove each
+ * response iocb from the response worker queue and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_iocbq *irspiocbq;
+	struct hbq_dmabuf *dmabuf;
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflag;
+	int count = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
+		/* Get the response iocb from the head of work queue */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		list_remove_head(&phba->sli4_hba.sp_queue_event,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+		case CQE_CODE_COMPL_WQE:
+			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
+						 cq_event);
+			/* Translate ELS WCQE to response IOCBQ */
+			irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
+								   irspiocbq);
+			if (irspiocbq)
+				lpfc_sli_sp_handle_rspiocb(phba, pring,
+							   irspiocbq);
+			count++;
+			break;
+		case CQE_CODE_RECEIVE:
+		case CQE_CODE_RECEIVE_V1:
+			dmabuf = container_of(cq_event, struct hbq_dmabuf,
+					      cq_event);
+			lpfc_sli4_handle_received_buffer(phba, dmabuf);
+			count++;
+			break;
+		default:
+			break;
+		}
+
+		/* Limit the number of events to 64 to avoid soft lockups */
+		if (count == 64)
+			break;
+	}
+}
+
+/**
+ * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	if (pring->ringno == LPFC_ELS_RING) {
+		lpfc_fabric_abort_hba(phba);
+	}
+
+	/* Error everything on txq and txcmplq
+	 * First do the txq.
+	 */
+	if (phba->sli_rev >= LPFC_SLI_REV4) {
+		spin_lock_irq(&pring->ring_lock);
+		list_splice_init(&pring->txq, &completions);
+		pring->txq_cnt = 0;
+		spin_unlock_irq(&pring->ring_lock);
+
+		spin_lock_irq(&phba->hbalock);
+		/* Next issue ABTS for everything on the txcmplq */
+		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		spin_unlock_irq(&phba->hbalock);
+	} else {
+		spin_lock_irq(&phba->hbalock);
+		list_splice_init(&pring->txq, &completions);
+		pring->txq_cnt = 0;
+
+		/* Next issue ABTS for everything on the txcmplq */
+		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	if (pring->ringno == LPFC_ELS_RING)
+		lpfc_fabric_abort_hba(phba);
+
+	spin_lock_irq(&phba->hbalock);
+	/* Next issue ABTS for everything on the txcmplq */
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+		lpfc_sli4_abort_nvme_io(phba, pring, iocb);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+
+/**
+ * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in FCP rings and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+	uint32_t i;
+
+	/* Look on all the FCP Rings for the iotag */
+	if (phba->sli_rev >= LPFC_SLI_REV4) {
+		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+			pring = phba->sli4_hba.fcp_wq[i]->pring;
+			lpfc_sli_abort_iocb_ring(phba, pring);
+		}
+	} else {
+		pring = &psli->sli3_ring[LPFC_FCP_RING];
+		lpfc_sli_abort_iocb_ring(phba, pring);
+	}
+}
+
+/**
+ * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function aborts all wqes in NVME rings. This function issues an
+ * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
+ * the txcmplq is not guaranteed to complete before the return of this
+ * function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring  *pring;
+	uint32_t i;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return;
+
+	/* Abort all IO on each NVME ring. */
+	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+		pring = phba->sli4_hba.nvme_wq[i]->pring;
+		lpfc_sli_abort_wqe_ring(phba, pring);
+	}
+}
+
+
+/**
+ * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * objects in txq and txcmplq. This function will not issue abort iocbs
+ * for all the iocb commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+{
+	LIST_HEAD(txq);
+	LIST_HEAD(txcmplq);
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+	uint32_t i;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Indicate the I/O queues are flushed */
+	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Look on all the FCP Rings for the iotag */
+	if (phba->sli_rev >= LPFC_SLI_REV4) {
+		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+			pring = phba->sli4_hba.fcp_wq[i]->pring;
+
+			spin_lock_irq(&pring->ring_lock);
+			/* Retrieve everything on txq */
+			list_splice_init(&pring->txq, &txq);
+			/* Retrieve everything on the txcmplq */
+			list_splice_init(&pring->txcmplq, &txcmplq);
+			pring->txq_cnt = 0;
+			pring->txcmplq_cnt = 0;
+			spin_unlock_irq(&pring->ring_lock);
+
+			/* Flush the txq */
+			lpfc_sli_cancel_iocbs(phba, &txq,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_DOWN);
+			/* Flush the txcmpq */
+			lpfc_sli_cancel_iocbs(phba, &txcmplq,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_DOWN);
+		}
+	} else {
+		pring = &psli->sli3_ring[LPFC_FCP_RING];
+
+		spin_lock_irq(&phba->hbalock);
+		/* Retrieve everything on txq */
+		list_splice_init(&pring->txq, &txq);
+		/* Retrieve everything on the txcmplq */
+		list_splice_init(&pring->txcmplq, &txcmplq);
+		pring->txq_cnt = 0;
+		pring->txcmplq_cnt = 0;
+		spin_unlock_irq(&phba->hbalock);
+
+		/* Flush the txq */
+		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
+				      IOERR_SLI_DOWN);
+		/* Flush the txcmpq */
+		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
+				      IOERR_SLI_DOWN);
+	}
+}
+
+/**
+ * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
+{
+	LIST_HEAD(txcmplq);
+	struct lpfc_sli_ring  *pring;
+	uint32_t i;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return;
+
+	/* Hint to other driver operations that a flush is in progress. */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cycle through all NVME rings and complete each IO with
+	 * a local driver reason code.  This is a flush so no
+	 * abort exchange to FW.
+	 */
+	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+		pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+		/* Retrieve everything on the txcmplq */
+		spin_lock_irq(&pring->ring_lock);
+		list_splice_init(&pring->txcmplq, &txcmplq);
+		pring->txcmplq_cnt = 0;
+		spin_unlock_irq(&pring->ring_lock);
+
+		/* Flush the txcmpq &&&PAE */
+		lpfc_sli_cancel_iocbs(phba, &txcmplq,
+				      IOSTAT_LOCAL_REJECT,
+				      IOERR_SLI_DOWN);
+	}
+}
+
+/**
+ * lpfc_sli_brdready_s3 - Check for sli3 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function reads the host status register and compares
+ * with the provided bit mask to check if HBA completed
+ * the restart. This function will wait in a loop for the
+ * HBA to complete restart. If the HBA does not restart within
+ * 15 iterations, the function will reset the HBA again. The
+ * function returns 1 when HBA fail to restart otherwise returns
+ * zero.
+ **/
+static int
+lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
+{
+	uint32_t status;
+	int i = 0;
+	int retval = 0;
+
+	/* Read the HBA Host Status Register */
+	if (lpfc_readl(phba->HSregaddr, &status))
+		return 1;
+
+	/*
+	 * Check status register every 100ms for 5 retries, then every
+	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
+	 * every 2.5 sec for 4.
+	 * Break our of the loop if errors occurred during init.
+	 */
+	while (((status & mask) != mask) &&
+	       !(status & HS_FFERM) &&
+	       i++ < 20) {
+
+		if (i <= 5)
+			msleep(10);
+		else if (i <= 10)
+			msleep(500);
+		else
+			msleep(2500);
+
+		if (i == 15) {
+				/* Do post */
+			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+			lpfc_sli_brdrestart(phba);
+		}
+		/* Read the HBA Host Status Register */
+		if (lpfc_readl(phba->HSregaddr, &status)) {
+			retval = 1;
+			break;
+		}
+	}
+
+	/* Check to see if any errors occurred during init */
+	if ((status & HS_FFERM) || (i >= 20)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2751 Adapter failed to restart, "
+				"status reg x%x, FW Data: A8 x%x AC x%x\n",
+				status,
+				readl(phba->MBslimaddr + 0xa8),
+				readl(phba->MBslimaddr + 0xac));
+		phba->link_state = LPFC_HBA_ERROR;
+		retval = 1;
+	}
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_brdready_s4 - Check for sli4 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function checks the host status register to check if HBA is
+ * ready. This function will wait in a loop for the HBA to be ready
+ * If the HBA is not ready , the function will will reset the HBA PCI
+ * function again. The function returns 1 when HBA fail to be ready
+ * otherwise returns zero.
+ **/
+static int
+lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
+{
+	uint32_t status;
+	int retval = 0;
+
+	/* Read the HBA Host Status Register */
+	status = lpfc_sli4_post_status_check(phba);
+
+	if (status) {
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		lpfc_sli_brdrestart(phba);
+		status = lpfc_sli4_post_status_check(phba);
+	}
+
+	/* Check to see if any errors occurred during init */
+	if (status) {
+		phba->link_state = LPFC_HBA_ERROR;
+		retval = 1;
+	} else
+		phba->sli4_hba.intr_enable = 0;
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_brdready - Wrapper func for checking the hba readyness
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
+ * from the API jump table function pointer from the lpfc_hba struct.
+ **/
+int
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+{
+	return phba->lpfc_sli_brdready(phba, mask);
+}
+
+#define BARRIER_TEST_PATTERN (0xdeadbeef)
+
+/**
+ * lpfc_reset_barrier - Make HBA ready for HBA reset
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
+ **/
+void lpfc_reset_barrier(struct lpfc_hba *phba)
+{
+	uint32_t __iomem *resp_buf;
+	uint32_t __iomem *mbox_buf;
+	volatile uint32_t mbox;
+	uint32_t hc_copy, ha_copy, resp_data;
+	int  i;
+	uint8_t hdrtype;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
+	if (hdrtype != 0x80 ||
+	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
+	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
+		return;
+
+	/*
+	 * Tell the other part of the chip to suspend temporarily all
+	 * its DMA activity.
+	 */
+	resp_buf = phba->MBslimaddr;
+
+	/* Disable the error attention */
+	if (lpfc_readl(phba->HCregaddr, &hc_copy))
+		return;
+	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	phba->link_flag |= LS_IGNORE_ERATT;
+
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return;
+	if (ha_copy & HA_ERATT) {
+		/* Clear Chip error bit */
+		writel(HA_ERATT, phba->HAregaddr);
+		phba->pport->stopped = 1;
+	}
+
+	mbox = 0;
+	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
+	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+
+	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
+	mbox_buf = phba->MBslimaddr;
+	writel(mbox, mbox_buf);
+
+	for (i = 0; i < 50; i++) {
+		if (lpfc_readl((resp_buf + 1), &resp_data))
+			return;
+		if (resp_data != ~(BARRIER_TEST_PATTERN))
+			mdelay(1);
+		else
+			break;
+	}
+	resp_data = 0;
+	if (lpfc_readl((resp_buf + 1), &resp_data))
+		return;
+	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
+		    phba->pport->stopped)
+			goto restore_hc;
+		else
+			goto clear_errat;
+	}
+
+	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+	resp_data = 0;
+	for (i = 0; i < 500; i++) {
+		if (lpfc_readl(resp_buf, &resp_data))
+			return;
+		if (resp_data != mbox)
+			mdelay(1);
+		else
+			break;
+	}
+
+clear_errat:
+
+	while (++i < 500) {
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return;
+		if (!(ha_copy & HA_ERATT))
+			mdelay(1);
+		else
+			break;
+	}
+
+	if (readl(phba->HAregaddr) & HA_ERATT) {
+		writel(HA_ERATT, phba->HAregaddr);
+		phba->pport->stopped = 1;
+	}
+
+restore_hc:
+	phba->link_flag &= ~LS_IGNORE_ERATT;
+	writel(hc_copy, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+}
+
+/**
+ * lpfc_sli_brdkill - Issue a kill_board mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * This function issues a kill_board mailbox command and waits for
+ * the error attention interrupt. This function is called for stopping
+ * the firmware processing. The caller is not required to hold any
+ * locks. This function calls lpfc_hba_down_post function to free
+ * any pending commands after the kill. The function will return 1 when it
+ * fails to kill the board else will return 0.
+ **/
+int
+lpfc_sli_brdkill(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	LPFC_MBOXQ_t *pmb;
+	uint32_t status;
+	uint32_t ha_copy;
+	int retval;
+	int i = 0;
+
+	psli = &phba->sli;
+
+	/* Kill HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0329 Kill HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb)
+		return 1;
+
+	/* Disable the error attention */
+	spin_lock_irq(&phba->hbalock);
+	if (lpfc_readl(phba->HCregaddr, &status)) {
+		spin_unlock_irq(&phba->hbalock);
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return 1;
+	}
+	status &= ~HC_ERINT_ENA;
+	writel(status, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	phba->link_flag |= LS_IGNORE_ERATT;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_kill_board(phba, pmb);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if (retval != MBX_SUCCESS) {
+		if (retval != MBX_BUSY)
+			mempool_free(pmb, phba->mbox_mem_pool);
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2752 KILL_BOARD command failed retval %d\n",
+				retval);
+		spin_lock_irq(&phba->hbalock);
+		phba->link_flag &= ~LS_IGNORE_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return 1;
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
+	 * attention every 100ms for 3 seconds. If we don't get ERATT after
+	 * 3 seconds we still set HBA_ERROR state because the status of the
+	 * board is now undefined.
+	 */
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return 1;
+	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
+		mdelay(100);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return 1;
+	}
+
+	del_timer_sync(&psli->mbox_tmo);
+	if (ha_copy & HA_ERATT) {
+		writel(HA_ERATT, phba->HAregaddr);
+		phba->pport->stopped = 1;
+	}
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	psli->mbox_active = NULL;
+	phba->link_flag &= ~LS_IGNORE_ERATT;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_hba_down_post(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+
+	return ha_copy & HA_ERATT ? 0 : 1;
+}
+
+/**
+ * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets the HBA by writing HC_INITFF to the control
+ * register. After the HBA resets, this function resets all the iocb ring
+ * indices. This function disables PCI layer parity checking during
+ * the reset.
+ * This function returns 0 always.
+ * The caller is not required to hold any locks.
+ **/
+int
+lpfc_sli_brdreset(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	uint16_t cfg_value;
+	int i;
+
+	psli = &phba->sli;
+
+	/* Reset HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0325 Reset HBA Data: x%x x%x\n",
+			(phba->pport) ? phba->pport->port_state : 0,
+			psli->sli_flag);
+
+	/* perform board reset */
+	phba->fc_eventTag = 0;
+	phba->link_events = 0;
+	if (phba->pport) {
+		phba->pport->fc_myDID = 0;
+		phba->pport->fc_prevDID = 0;
+	}
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND,
+			      (cfg_value &
+			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
+
+	/* Now toggle INITFF bit in the Host Control Register */
+	writel(HC_INITFF, phba->HCregaddr);
+	mdelay(1);
+	readl(phba->HCregaddr); /* flush */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+
+	/* Restore PCI cmd register */
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
+	/* Initialize relevant SLI info */
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->sli3_ring[i];
+		pring->flag = 0;
+		pring->sli.sli3.rspidx = 0;
+		pring->sli.sli3.next_cmdidx  = 0;
+		pring->sli.sli3.local_getidx = 0;
+		pring->sli.sli3.cmdidx = 0;
+		pring->missbufcnt = 0;
+	}
+
+	phba->link_state = LPFC_WARM_START;
+	return 0;
+}
+
+/**
+ * lpfc_sli4_brdreset - Reset a sli-4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets a SLI4 HBA. This function disables PCI layer parity
+ * checking during resets the device. The caller is not required to hold
+ * any locks.
+ *
+ * This function returns 0 always.
+ **/
+int
+lpfc_sli4_brdreset(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	uint16_t cfg_value;
+	int rc = 0;
+
+	/* Reset HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0295 Reset HBA Data: x%x x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag,
+			phba->hba_flag);
+
+	/* perform board reset */
+	phba->fc_eventTag = 0;
+	phba->link_events = 0;
+	phba->pport->fc_myDID = 0;
+	phba->pport->fc_prevDID = 0;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~(LPFC_PROCESS_LA);
+	phba->fcf.fcf_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
+	if (phba->hba_flag & HBA_FW_DUMP_OP) {
+		phba->hba_flag &= ~HBA_FW_DUMP_OP;
+		return rc;
+	}
+
+	/* Now physically reset the device */
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0389 Performing PCI function reset!\n");
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
+			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+	/* Perform FCoE PCI function reset before freeing queue memory */
+	rc = lpfc_pci_function_reset(phba);
+
+	/* Restore PCI cmd register */
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
+	return rc;
+}
+
+/**
+ * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to
+ * restart the HBA. The caller is not required to hold any lock.
+ * This function writes MBX_RESTART mailbox command to the SLIM and
+ * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
+ * function to free any pending commands. The function enables
+ * POST only during the first initialization. The function returns zero.
+ * The function does not guarantee completion of MBX_RESTART mailbox
+ * command before the return of this function.
+ **/
+static int
+lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
+{
+	MAILBOX_t *mb;
+	struct lpfc_sli *psli;
+	volatile uint32_t word0;
+	void __iomem *to_slim;
+	uint32_t hba_aer_enabled;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Take PCIe device Advanced Error Reporting (AER) state */
+	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
+	psli = &phba->sli;
+
+	/* Restart HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0337 Restart HBA Data: x%x x%x\n",
+			(phba->pport) ? phba->pport->port_state : 0,
+			psli->sli_flag);
+
+	word0 = 0;
+	mb = (MAILBOX_t *) &word0;
+	mb->mbxCommand = MBX_RESTART;
+	mb->mbxHc = 1;
+
+	lpfc_reset_barrier(phba);
+
+	to_slim = phba->MBslimaddr;
+	writel(*(uint32_t *) mb, to_slim);
+	readl(to_slim); /* flush */
+
+	/* Only skip post after fc_ffinit is completed */
+	if (phba->pport && phba->pport->port_state)
+		word0 = 1;	/* This is really setting up word1 */
+	else
+		word0 = 0;	/* This is really setting up word1 */
+	to_slim = phba->MBslimaddr + sizeof (uint32_t);
+	writel(*(uint32_t *) mb, to_slim);
+	readl(to_slim); /* flush */
+
+	lpfc_sli_brdreset(phba);
+	if (phba->pport)
+		phba->pport->stopped = 0;
+	phba->link_state = LPFC_INIT_START;
+	phba->hba_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+	psli->stats_start = get_seconds();
+
+	/* Give the INITFF and Post time to settle. */
+	mdelay(100);
+
+	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
+	if (hba_aer_enabled)
+		pci_disable_pcie_error_reporting(phba->pcidev);
+
+	lpfc_hba_down_post(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to restart
+ * a SLI4 HBA. The caller is not required to hold any lock.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static int
+lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t hba_aer_enabled;
+	int rc;
+
+	/* Restart HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0296 Restart HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	/* Take PCIe device Advanced Error Reporting (AER) state */
+	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
+	rc = lpfc_sli4_brdreset(phba);
+	if (rc)
+		return rc;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->pport->stopped = 0;
+	phba->link_state = LPFC_INIT_START;
+	phba->hba_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+	psli->stats_start = get_seconds();
+
+	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
+	if (hba_aer_enabled)
+		pci_disable_pcie_error_reporting(phba->pcidev);
+
+	lpfc_hba_down_post(phba);
+	lpfc_sli4_queue_destroy(phba);
+
+	return rc;
+}
+
+/**
+ * lpfc_sli_brdrestart - Wrapper func for restarting hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+**/
+int
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
+{
+	return phba->lpfc_sli_brdrestart(phba);
+}
+
+/**
+ * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called after a HBA restart to wait for successful
+ * restart of the HBA. Successful restart of the HBA is indicated by
+ * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
+ * iteration, the function will restart the HBA again. The function returns
+ * zero if HBA successfully restarted else returns negative error code.
+ **/
+int
+lpfc_sli_chipset_init(struct lpfc_hba *phba)
+{
+	uint32_t status, i = 0;
+
+	/* Read the HBA Host Status Register */
+	if (lpfc_readl(phba->HSregaddr, &status))
+		return -EIO;
+
+	/* Check status register to see what current state is */
+	i = 0;
+	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
+
+		/* Check every 10ms for 10 retries, then every 100ms for 90
+		 * retries, then every 1 sec for 50 retires for a total of
+		 * ~60 seconds before reset the board again and check every
+		 * 1 sec for 50 retries. The up to 60 seconds before the
+		 * board ready is required by the Falcon FIPS zeroization
+		 * complete, and any reset the board in between shall cause
+		 * restart of zeroization, further delay the board ready.
+		 */
+		if (i++ >= 200) {
+			/* Adapter failed to init, timeout, status reg
+			   <status> */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0436 Adapter failed to init, "
+					"timeout, status reg x%x, "
+					"FW Data: A8 x%x AC x%x\n", status,
+					readl(phba->MBslimaddr + 0xa8),
+					readl(phba->MBslimaddr + 0xac));
+			phba->link_state = LPFC_HBA_ERROR;
+			return -ETIMEDOUT;
+		}
+
+		/* Check to see if any errors occurred during init */
+		if (status & HS_FFERM) {
+			/* ERROR: During chipset initialization */
+			/* Adapter failed to init, chipset, status reg
+			   <status> */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0437 Adapter failed to init, "
+					"chipset, status reg x%x, "
+					"FW Data: A8 x%x AC x%x\n", status,
+					readl(phba->MBslimaddr + 0xa8),
+					readl(phba->MBslimaddr + 0xac));
+			phba->link_state = LPFC_HBA_ERROR;
+			return -EIO;
+		}
+
+		if (i <= 10)
+			msleep(10);
+		else if (i <= 100)
+			msleep(100);
+		else
+			msleep(1000);
+
+		if (i == 150) {
+			/* Do post */
+			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+			lpfc_sli_brdrestart(phba);
+		}
+		/* Read the HBA Host Status Register */
+		if (lpfc_readl(phba->HSregaddr, &status))
+			return -EIO;
+	}
+
+	/* Check to see if any errors occurred during init */
+	if (status & HS_FFERM) {
+		/* ERROR: During chipset initialization */
+		/* Adapter failed to init, chipset, status reg <status> */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0438 Adapter failed to init, chipset, "
+				"status reg x%x, "
+				"FW Data: A8 x%x AC x%x\n", status,
+				readl(phba->MBslimaddr + 0xa8),
+				readl(phba->MBslimaddr + 0xac));
+		phba->link_state = LPFC_HBA_ERROR;
+		return -EIO;
+	}
+
+	/* Clear all interrupt enable conditions */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+
+	/* setup host attn register */
+	writel(0xffffffff, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	return 0;
+}
+
+/**
+ * lpfc_sli_hbq_count - Get the number of HBQs to be configured
+ *
+ * This function calculates and returns the number of HBQs required to be
+ * configured.
+ **/
+int
+lpfc_sli_hbq_count(void)
+{
+	return ARRAY_SIZE(lpfc_hbq_defs);
+}
+
+/**
+ * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
+ *
+ * This function adds the number of hbq entries in every HBQ to get
+ * the total number of hbq entries required for the HBA and returns
+ * the total count.
+ **/
+static int
+lpfc_sli_hbq_entry_count(void)
+{
+	int  hbq_count = lpfc_sli_hbq_count();
+	int  count = 0;
+	int  i;
+
+	for (i = 0; i < hbq_count; ++i)
+		count += lpfc_hbq_defs[i]->entry_count;
+	return count;
+}
+
+/**
+ * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
+ *
+ * This function calculates amount of memory required for all hbq entries
+ * to be configured and returns the total memory required.
+ **/
+int
+lpfc_sli_hbq_size(void)
+{
+	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
+}
+
+/**
+ * lpfc_sli_hbq_setup - configure and initialize HBQs
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli_hbq_setup(struct lpfc_hba *phba)
+{
+	int  hbq_count = lpfc_sli_hbq_count();
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *pmbox;
+	uint32_t hbqno;
+	uint32_t hbq_entry_index;
+
+				/* Get a Mailbox buffer to setup mailbox
+				 * commands for HBA initialization
+				 */
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!pmb)
+		return -ENOMEM;
+
+	pmbox = &pmb->u.mb;
+
+	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
+	phba->link_state = LPFC_INIT_MBX_CMDS;
+	phba->hbq_in_use = 1;
+
+	hbq_entry_index = 0;
+	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+		phba->hbqs[hbqno].next_hbqPutIdx = 0;
+		phba->hbqs[hbqno].hbqPutIdx      = 0;
+		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
+		phba->hbqs[hbqno].entry_count =
+			lpfc_hbq_defs[hbqno]->entry_count;
+		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
+			hbq_entry_index, pmb);
+		hbq_entry_index += phba->hbqs[hbqno].entry_count;
+
+		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
+			   mbxStatus <status>, ring <num> */
+
+			lpfc_printf_log(phba, KERN_ERR,
+					LOG_SLI | LOG_VPORT,
+					"1805 Adapter failed to init. "
+					"Data: x%x x%x x%x\n",
+					pmbox->mbxCommand,
+					pmbox->mbxStatus, hbqno);
+
+			phba->link_state = LPFC_HBA_ERROR;
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -ENXIO;
+		}
+	}
+	phba->hbq_count = hbq_count;
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* Initially populate or replenish the HBQs */
+	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
+		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
+	return 0;
+}
+
+/**
+ * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli4_rb_setup(struct lpfc_hba *phba)
+{
+	phba->hbq_in_use = 1;
+	phba->hbqs[LPFC_ELS_HBQ].entry_count =
+		lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
+	phba->hbq_count = 1;
+	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
+	/* Initially populate or replenish the HBQs */
+	return 0;
+}
+
+/**
+ * lpfc_sli_config_port - Issue config port mailbox command
+ * @phba: Pointer to HBA context object.
+ * @sli_mode: sli mode - 2/3
+ *
+ * This function is called by the sli initialization code path
+ * to issue config_port mailbox command. This function restarts the
+ * HBA firmware and issues a config_port mailbox command to configure
+ * the SLI interface in the sli mode specified by sli_mode
+ * variable. The caller is not required to hold any locks.
+ * The function returns 0 if successful, else returns negative error
+ * code.
+ **/
+int
+lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
+{
+	LPFC_MBOXQ_t *pmb;
+	uint32_t resetcount = 0, rc = 0, done = 0;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	phba->sli_rev = sli_mode;
+	while (resetcount < 2 && !done) {
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		lpfc_sli_brdrestart(phba);
+		rc = lpfc_sli_chipset_init(phba);
+		if (rc)
+			break;
+
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+		resetcount++;
+
+		/* Call pre CONFIG_PORT mailbox command initialization.  A
+		 * value of 0 means the call was successful.  Any other
+		 * nonzero value is a failure, but if ERESTART is returned,
+		 * the driver may reset the HBA and try again.
+		 */
+		rc = lpfc_config_port_prep(phba);
+		if (rc == -ERESTART) {
+			phba->link_state = LPFC_LINK_UNKNOWN;
+			continue;
+		} else if (rc)
+			break;
+
+		phba->link_state = LPFC_INIT_MBX_CMDS;
+		lpfc_config_port(phba, pmb);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
+					LPFC_SLI3_HBQ_ENABLED |
+					LPFC_SLI3_CRP_ENABLED |
+					LPFC_SLI3_BG_ENABLED |
+					LPFC_SLI3_DSS_ENABLED);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0442 Adapter failed to init, mbxCmd x%x "
+				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
+			spin_lock_irq(&phba->hbalock);
+			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+			spin_unlock_irq(&phba->hbalock);
+			rc = -ENXIO;
+		} else {
+			/* Allow asynchronous mailbox command to go through */
+			spin_lock_irq(&phba->hbalock);
+			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+			spin_unlock_irq(&phba->hbalock);
+			done = 1;
+
+			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
+				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"3110 Port did not grant ASABT\n");
+		}
+	}
+	if (!done) {
+		rc = -EINVAL;
+		goto do_prep_failed;
+	}
+	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
+		if (!pmb->u.mb.un.varCfgPort.cMA) {
+			rc = -ENXIO;
+			goto do_prep_failed;
+		}
+		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
+			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
+			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
+				phba->max_vpi : phba->max_vports;
+
+		} else
+			phba->max_vpi = 0;
+		phba->fips_level = 0;
+		phba->fips_spec_rev = 0;
+		if (pmb->u.mb.un.varCfgPort.gdss) {
+			phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+			phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
+			phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2850 Security Crypto Active. FIPS x%d "
+					"(Spec Rev: x%d)",
+					phba->fips_level, phba->fips_spec_rev);
+		}
+		if (pmb->u.mb.un.varCfgPort.sec_err) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2856 Config Port Security Crypto "
+					"Error: x%x ",
+					pmb->u.mb.un.varCfgPort.sec_err);
+		}
+		if (pmb->u.mb.un.varCfgPort.gerbm)
+			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+		if (pmb->u.mb.un.varCfgPort.gcrp)
+			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
+
+		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+		phba->port_gp = phba->mbox->us.s3_pgp.port;
+
+		if (phba->cfg_enable_bg) {
+			if (pmb->u.mb.un.varCfgPort.gbg)
+				phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+			else
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0443 Adapter did not grant "
+						"BlockGuard\n");
+		}
+	} else {
+		phba->hbq_get = NULL;
+		phba->port_gp = phba->mbox->us.s2.port;
+		phba->max_vpi = 0;
+	}
+do_prep_failed:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+}
+
+
+/**
+ * lpfc_sli_hba_setup - SLI initialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI initialization function. This function
+ * is called by the HBA initialization code, HBA reset code and HBA
+ * error attention handler code. Caller is not required to hold any
+ * locks. This function issues config_port mailbox command to configure
+ * the SLI, setup iocb rings and HBQ rings. In the end the function
+ * calls the config_port_post function to issue init_link mailbox
+ * command and to start the discovery. The function will return zero
+ * if successful, else it will return negative error code.
+ **/
+int
+lpfc_sli_hba_setup(struct lpfc_hba *phba)
+{
+	uint32_t rc;
+	int  mode = 3, i;
+	int longs;
+
+	switch (phba->cfg_sli_mode) {
+	case 2:
+		if (phba->cfg_enable_npiv) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+				"1824 NPIV enabled: Override sli_mode "
+				"parameter (%d) to auto (0).\n",
+				phba->cfg_sli_mode);
+			break;
+		}
+		mode = 2;
+		break;
+	case 0:
+	case 3:
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+				"1819 Unrecognized sli_mode parameter: %d.\n",
+				phba->cfg_sli_mode);
+
+		break;
+	}
+	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
+
+	rc = lpfc_sli_config_port(phba, mode);
+
+	if (rc && phba->cfg_sli_mode == 3)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+				"1820 Unable to select SLI-3.  "
+				"Not supported by adapter.\n");
+	if (rc && mode != 2)
+		rc = lpfc_sli_config_port(phba, 2);
+	else if (rc && mode == 2)
+		rc = lpfc_sli_config_port(phba, 3);
+	if (rc)
+		goto lpfc_sli_hba_setup_error;
+
+	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
+	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+		rc = pci_enable_pcie_error_reporting(phba->pcidev);
+		if (!rc) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2709 This device supports "
+					"Advanced Error Reporting (AER)\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag |= HBA_AER_ENABLED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2708 This device does not support "
+					"Advanced Error Reporting (AER): %d\n",
+					rc);
+			phba->cfg_aer_support = 0;
+		}
+	}
+
+	if (phba->sli_rev == 3) {
+		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
+		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
+	} else {
+		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
+		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
+		phba->sli3_options = 0;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
+			phba->sli_rev, phba->max_vpi);
+	rc = lpfc_sli_ring_map(phba);
+
+	if (rc)
+		goto lpfc_sli_hba_setup_error;
+
+	/* Initialize VPIs. */
+	if (phba->sli_rev == LPFC_SLI_REV3) {
+		/*
+		 * The VPI bitmask and physical ID array are allocated
+		 * and initialized once only - at driver load.  A port
+		 * reset doesn't need to reinitialize this memory.
+		 */
+		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+			phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+						  GFP_KERNEL);
+			if (!phba->vpi_bmask) {
+				rc = -ENOMEM;
+				goto lpfc_sli_hba_setup_error;
+			}
+
+			phba->vpi_ids = kzalloc(
+					(phba->max_vpi+1) * sizeof(uint16_t),
+					GFP_KERNEL);
+			if (!phba->vpi_ids) {
+				kfree(phba->vpi_bmask);
+				rc = -ENOMEM;
+				goto lpfc_sli_hba_setup_error;
+			}
+			for (i = 0; i < phba->max_vpi; i++)
+				phba->vpi_ids[i] = i;
+		}
+	}
+
+	/* Init HBQs */
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		rc = lpfc_sli_hbq_setup(phba);
+		if (rc)
+			goto lpfc_sli_hba_setup_error;
+	}
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag |= LPFC_PROCESS_LA;
+	spin_unlock_irq(&phba->hbalock);
+
+	rc = lpfc_config_port_post(phba);
+	if (rc)
+		goto lpfc_sli_hba_setup_error;
+
+	return rc;
+
+lpfc_sli_hba_setup_error:
+	phba->link_state = LPFC_HBA_ERROR;
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0445 Firmware initialization failed\n");
+	return rc;
+}
+
+/**
+ * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
+ * @phba: Pointer to HBA context object.
+ * @mboxq: mailbox pointer.
+ * This function issue a dump mailbox command to read config region
+ * 23 and parse the records in the region and populate driver
+ * data structure.
+ **/
+static int
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_mqe *mqe;
+	uint32_t data_length;
+	int rc;
+
+	/* Program the default value of vlan_id and fc_map */
+	phba->valid_vlan = 0;
+	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	mqe = &mboxq->u.mqe;
+	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+		rc = -ENOMEM;
+		goto out_free_mboxq;
+	}
+
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):2571 Mailbox cmd x%x Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			bf_get(lpfc_mqe_command, mqe),
+			bf_get(lpfc_mqe_status, mqe),
+			mqe->un.mb_words[0], mqe->un.mb_words[1],
+			mqe->un.mb_words[2], mqe->un.mb_words[3],
+			mqe->un.mb_words[4], mqe->un.mb_words[5],
+			mqe->un.mb_words[6], mqe->un.mb_words[7],
+			mqe->un.mb_words[8], mqe->un.mb_words[9],
+			mqe->un.mb_words[10], mqe->un.mb_words[11],
+			mqe->un.mb_words[12], mqe->un.mb_words[13],
+			mqe->un.mb_words[14], mqe->un.mb_words[15],
+			mqe->un.mb_words[16], mqe->un.mb_words[50],
+			mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
+
+	if (rc) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		rc = -EIO;
+		goto out_free_mboxq;
+	}
+	data_length = mqe->un.mb_words[5];
+	if (data_length > DMP_RGN23_SIZE) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		rc = -EIO;
+		goto out_free_mboxq;
+	}
+
+	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	rc = 0;
+
+out_free_mboxq:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the LPFC_MBOXQ_t structure.
+ * @vpd: pointer to the memory to hold resulting port vpd data.
+ * @vpd_size: On input, the number of bytes allocated to @vpd.
+ *	      On output, the number of data bytes in @vpd.
+ *
+ * This routine executes a READ_REV SLI4 mailbox command.  In
+ * addition, this routine gets the port vpd data.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+		    uint8_t *vpd, uint32_t *vpd_size)
+{
+	int rc = 0;
+	uint32_t dma_size;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_mqe *mqe;
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	/*
+	 * Get a DMA buffer for the vpd data resulting from the READ_REV
+	 * mailbox command.
+	 */
+	dma_size = *vpd_size;
+	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
+					   &dmabuf->phys, GFP_KERNEL);
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return -ENOMEM;
+	}
+
+	/*
+	 * The SLI4 implementation of READ_REV conflicts at word1,
+	 * bits 31:16 and SLI4 adds vpd functionality not present
+	 * in SLI3.  This code corrects the conflicts.
+	 */
+	lpfc_read_rev(phba, mboxq);
+	mqe = &mboxq->u.mqe;
+	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
+	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
+	mqe->un.read_rev.word1 &= 0x0000FFFF;
+	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
+	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc) {
+		dma_free_coherent(&phba->pcidev->dev, dma_size,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+		return -EIO;
+	}
+
+	/*
+	 * The available vpd length cannot be bigger than the
+	 * DMA buffer passed to the port.  Catch the less than
+	 * case and update the caller's size.
+	 */
+	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
+		*vpd_size = mqe->un.read_rev.avail_vpd_len;
+
+	memcpy(vpd, dmabuf->virt, *vpd_size);
+
+	dma_free_coherent(&phba->pcidev->dev, dma_size,
+			  dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return 0;
+}
+
+/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ *      0 - successful
+ *      otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
+	struct lpfc_controller_attribute *cntl_attr;
+	struct lpfc_mbx_get_port_name *get_port_name;
+	void *virtaddr = NULL;
+	uint32_t alloclen, reqlen;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	char cport_name = 0;
+	int rc;
+
+	/* We assume nothing at this point */
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+	/* obtain link type and link number via READ_CONFIG */
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+	lpfc_sli4_read_config(phba);
+	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+		goto retrieve_ppname;
+
+	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
+	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
+			LPFC_SLI4_MBX_NEMBED);
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3084 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory size "
+				"(%d)\n", alloclen, reqlen);
+		rc = -ENOMEM;
+		goto out_free_mboxq;
+	}
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	virtaddr = mboxq->sge_array->addr[0];
+	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
+	shdr = &mbx_cntl_attr->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3085 Mailbox x%x (x%x/x%x) failed, "
+				"rc:x%x, status:x%x, add_status:x%x\n",
+				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				rc, shdr_status, shdr_add_status);
+		rc = -ENXIO;
+		goto out_free_mboxq;
+	}
+	cntl_attr = &mbx_cntl_attr->cntl_attr;
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+	phba->sli4_hba.lnk_info.lnk_tp =
+		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
+	phba->sli4_hba.lnk_info.lnk_no =
+		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3086 lnk_type:%d, lnk_numb:%d\n",
+			phba->sli4_hba.lnk_info.lnk_tp,
+			phba->sli4_hba.lnk_info.lnk_no);
+
+retrieve_ppname:
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+		LPFC_MBOX_OPCODE_GET_PORT_NAME,
+		sizeof(struct lpfc_mbx_get_port_name) -
+		sizeof(struct lpfc_sli4_cfg_mhdr),
+		LPFC_SLI4_MBX_EMBED);
+	get_port_name = &mboxq->u.mqe.un.get_port_name;
+	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
+	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
+		phba->sli4_hba.lnk_info.lnk_tp);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3087 Mailbox x%x (x%x/x%x) failed: "
+				"rc:x%x, status:x%x, add_status:x%x\n",
+				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				rc, shdr_status, shdr_add_status);
+		rc = -ENXIO;
+		goto out_free_mboxq;
+	}
+	switch (phba->sli4_hba.lnk_info.lnk_no) {
+	case LPFC_LINK_NUMBER_0:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	case LPFC_LINK_NUMBER_1:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	case LPFC_LINK_NUMBER_2:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	case LPFC_LINK_NUMBER_3:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	default:
+		break;
+	}
+
+	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
+		phba->Port[0] = cport_name;
+		phba->Port[1] = '\0';
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3091 SLI get port name: %s\n", phba->Port);
+	}
+
+out_free_mboxq:
+	if (rc != MBX_TIMEOUT) {
+		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		else
+			mempool_free(mboxq, phba->mbox_mem_pool);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to explicitly arm the SLI4 device's completion and
+ * event queues
+ **/
+static void
+lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
+{
+	int qidx;
+
+	lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
+	lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
+	if (phba->sli4_hba.nvmels_cq)
+		lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
+						LPFC_QUEUE_REARM);
+
+	if (phba->sli4_hba.fcp_cq)
+		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
+			lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
+						LPFC_QUEUE_REARM);
+
+	if (phba->sli4_hba.nvme_cq)
+		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
+			lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
+						LPFC_QUEUE_REARM);
+
+	if (phba->cfg_fof)
+		lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
+
+	if (phba->sli4_hba.hba_eq)
+		for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
+			lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
+						LPFC_QUEUE_REARM);
+
+	if (phba->nvmet_support) {
+		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
+			lpfc_sli4_cq_release(
+				phba->sli4_hba.nvmet_cqset[qidx],
+				LPFC_QUEUE_REARM);
+		}
+	}
+
+	if (phba->cfg_fof)
+		lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port available extent count.
+ * @extnt_size: buffer to hold element count per extent.
+ *
+ * This function calls the port and retrievs the number of available
+ * extents and their size for a particular extent type.
+ *
+ * Returns: 0 if successful.  Nonzero otherwise.
+ **/
+int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+			       uint16_t *extnt_count, uint16_t *extnt_size)
+{
+	int rc = 0;
+	uint32_t length;
+	uint32_t mbox_tmo;
+	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+	LPFC_MBOXQ_t *mbox;
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	/* Find out how many extents are available for this resource type */
+	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	/* Send an extents count of 0 - the GET doesn't use it. */
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+					LPFC_SLI4_MBX_EMBED);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+	if (bf_get(lpfc_mbox_hdr_status,
+		   &rsrc_info->header.cfg_shdr.response)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+				"2930 Failed to get resource extents "
+				"Status 0x%x Add'l Status 0x%x\n",
+				bf_get(lpfc_mbox_hdr_status,
+				       &rsrc_info->header.cfg_shdr.response),
+				bf_get(lpfc_mbox_hdr_add_status,
+				       &rsrc_info->header.cfg_shdr.response));
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+			      &rsrc_info->u.rsp);
+	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+			     &rsrc_info->u.rsp);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3162 Retrieved extents type-%d from port: count:%d, "
+			"size:%d\n", type, *extnt_count, *extnt_size);
+
+err_exit:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ *   -Error: error indicates problem.
+ *   1: Extent count or size has changed.
+ *   0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+	uint16_t curr_ext_cnt, rsrc_ext_cnt;
+	uint16_t size_diff, rsrc_ext_size;
+	int rc = 0;
+	struct lpfc_rsrc_blks *rsrc_entry;
+	struct list_head *rsrc_blk_list = NULL;
+
+	size_diff = 0;
+	curr_ext_cnt = 0;
+	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+					    &rsrc_ext_cnt,
+					    &rsrc_ext_size);
+	if (unlikely(rc))
+		return -EIO;
+
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+		break;
+	default:
+		break;
+	}
+
+	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+		curr_ext_cnt++;
+		if (rsrc_entry->rsrc_size != rsrc_ext_size)
+			size_diff++;
+	}
+
+	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+		rc = 1;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request.  It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ *   -Error:  Error value describes the condition found.
+ *   0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
+			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+	int rc = 0;
+	uint32_t req_len;
+	uint32_t emb_len;
+	uint32_t alloc_len, mbox_tmo;
+
+	/* Calculate the total requested length of the dma memory */
+	req_len = extnt_cnt * sizeof(uint16_t);
+
+	/*
+	 * Calculate the size of an embedded mailbox.  The uint32_t
+	 * accounts for extents-specific word.
+	 */
+	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+		sizeof(uint32_t);
+
+	/*
+	 * Presume the allocation and response will fit into an embedded
+	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
+	 */
+	*emb = LPFC_SLI4_MBX_EMBED;
+	if (req_len > emb_len) {
+		req_len = extnt_cnt * sizeof(uint16_t) +
+			sizeof(union lpfc_sli4_cfg_shdr) +
+			sizeof(uint32_t);
+		*emb = LPFC_SLI4_MBX_NEMBED;
+	}
+
+	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+				     req_len, *emb);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2982 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		return -ENOMEM;
+	}
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
+	if (unlikely(rc))
+		return -EIO;
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+
+	if (unlikely(rc))
+		rc = -EIO;
+	return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type:  The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+	bool emb = false;
+	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+	uint16_t rsrc_id, rsrc_start, j, k;
+	uint16_t *ids;
+	int i, rc;
+	unsigned long longs;
+	unsigned long *bmask;
+	struct lpfc_rsrc_blks *rsrc_blks;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t length;
+	struct lpfc_id_range *id_array = NULL;
+	void *virtaddr = NULL;
+	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+	struct list_head *ext_blk_list;
+
+	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+					    &rsrc_cnt,
+					    &rsrc_size);
+	if (unlikely(rc))
+		return -EIO;
+
+	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+			"3009 No available Resource Extents "
+			"for resource type 0x%x: Count: 0x%x, "
+			"Size 0x%x\n", type, rsrc_cnt,
+			rsrc_size);
+		return -ENOMEM;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
+			"2903 Post resource extents type-0x%x: "
+			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	/*
+	 * Figure out where the response is located.  Then get local pointers
+	 * to the response data.  The port does not guarantee to respond to
+	 * all extents counts request so update the local variable with the
+	 * allocated count from the port.
+	 */
+	if (emb == LPFC_SLI4_MBX_EMBED) {
+		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+		id_array = &rsrc_ext->u.rsp.id[0];
+		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+	} else {
+		virtaddr = mbox->sge_array->addr[0];
+		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+		id_array = &n_rsrc->id;
+	}
+
+	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+	rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+	/*
+	 * Based on the resource size and count, correct the base and max
+	 * resource values.
+	 */
+	length = sizeof(struct lpfc_rsrc_blks);
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		phba->sli4_hba.rpi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_ids)) {
+			kfree(phba->sli4_hba.rpi_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/*
+		 * The next_rpi was initialized with the maximum available
+		 * count but the port may allocate a smaller number.  Catch
+		 * that case and update the next_rpi.
+		 */
+		phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->sli4_hba.rpi_bmask;
+		ids = phba->sli4_hba.rpi_ids;
+		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		phba->vpi_bmask = kzalloc(longs *
+					  sizeof(unsigned long),
+					  GFP_KERNEL);
+		if (unlikely(!phba->vpi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->vpi_ids = kzalloc(rsrc_id_cnt *
+					 sizeof(uint16_t),
+					 GFP_KERNEL);
+		if (unlikely(!phba->vpi_ids)) {
+			kfree(phba->vpi_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->vpi_bmask;
+		ids = phba->vpi_ids;
+		ext_blk_list = &phba->lpfc_vpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		phba->sli4_hba.xri_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.max_cfg_param.xri_used = 0;
+		phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_ids)) {
+			kfree(phba->sli4_hba.xri_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->sli4_hba.xri_bmask;
+		ids = phba->sli4_hba.xri_ids;
+		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		phba->sli4_hba.vfi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_ids)) {
+			kfree(phba->sli4_hba.vfi_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->sli4_hba.vfi_bmask;
+		ids = phba->sli4_hba.vfi_ids;
+		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+		break;
+	default:
+		/* Unsupported Opcode.  Fail call. */
+		id_array = NULL;
+		bmask = NULL;
+		ids = NULL;
+		ext_blk_list = NULL;
+		goto err_exit;
+	}
+
+	/*
+	 * Complete initializing the extent configuration with the
+	 * allocated ids assigned to this function.  The bitmask serves
+	 * as an index into the array and manages the available ids.  The
+	 * array just stores the ids communicated to the port via the wqes.
+	 */
+	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+		if ((i % 2) == 0)
+			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+					 &id_array[k]);
+		else
+			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+					 &id_array[k]);
+
+		rsrc_blks = kzalloc(length, GFP_KERNEL);
+		if (unlikely(!rsrc_blks)) {
+			rc = -ENOMEM;
+			kfree(bmask);
+			kfree(ids);
+			goto err_exit;
+		}
+		rsrc_blks->rsrc_start = rsrc_id;
+		rsrc_blks->rsrc_size = rsrc_size;
+		list_add_tail(&rsrc_blks->list, ext_blk_list);
+		rsrc_start = rsrc_id;
+		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
+			phba->sli4_hba.scsi_xri_start = rsrc_start +
+				lpfc_sli4_get_iocb_cnt(phba);
+			phba->sli4_hba.nvme_xri_start =
+				phba->sli4_hba.scsi_xri_start +
+				phba->sli4_hba.scsi_xri_max;
+		}
+
+		while (rsrc_id < (rsrc_start + rsrc_size)) {
+			ids[j] = rsrc_id;
+			rsrc_id++;
+			j++;
+		}
+		/* Entire word processed.  Get next word.*/
+		if ((i % 2) == 1)
+			k++;
+	}
+ err_exit:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	return rc;
+}
+
+
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range.  It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+	int rc;
+	uint32_t length, mbox_tmo = 0;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	/*
+	 * This function sends an embedded mailbox because it only sends the
+	 * the resource type.  All extents of this type are released by the
+	 * port.
+	 */
+	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	/* Send an extents count of 0 - the dealloc doesn't use it. */
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+					LPFC_SLI4_MBX_EMBED);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+	if (bf_get(lpfc_mbox_hdr_status,
+		   &dealloc_rsrc->header.cfg_shdr.response)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+				"2919 Failed to release resource extents "
+				"for type %d - Status 0x%x Add'l Status 0x%x. "
+				"Resource memory not released.\n",
+				type,
+				bf_get(lpfc_mbox_hdr_status,
+				    &dealloc_rsrc->header.cfg_shdr.response),
+				bf_get(lpfc_mbox_hdr_add_status,
+				    &dealloc_rsrc->header.cfg_shdr.response));
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	/* Release kernel memory resources for the specific type. */
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		kfree(phba->vpi_bmask);
+		kfree(phba->vpi_ids);
+		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->lpfc_vpi_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		phba->sli4_hba.max_cfg_param.vpi_used = 0;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		kfree(phba->sli4_hba.xri_bmask);
+		kfree(phba->sli4_hba.xri_ids);
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		kfree(phba->sli4_hba.vfi_bmask);
+		kfree(phba->sli4_hba.vfi_ids);
+		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		/* RPI bitmask and physical id array are cleaned up earlier. */
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	default:
+		break;
+	}
+
+	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return rc;
+}
+
+static void
+lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+		  uint32_t feature)
+{
+	uint32_t len;
+
+	len = sizeof(struct lpfc_mbx_set_feature) -
+		sizeof(struct lpfc_sli4_cfg_mhdr);
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
+			 LPFC_SLI4_MBX_EMBED);
+
+	switch (feature) {
+	case LPFC_SET_UE_RECOVERY:
+		bf_set(lpfc_mbx_set_feature_UER,
+		       &mbox->u.mqe.un.set_feature, 1);
+		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
+		mbox->u.mqe.un.set_feature.param_len = 8;
+		break;
+	case LPFC_SET_MDS_DIAGS:
+		bf_set(lpfc_mbx_set_feature_mds,
+		       &mbox->u.mqe.un.set_feature, 1);
+		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
+		       &mbox->u.mqe.un.set_feature, 1);
+		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
+		mbox->u.mqe.un.set_feature.param_len = 8;
+		break;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+	int i, rc, error = 0;
+	uint16_t count, base;
+	unsigned long longs;
+
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+	if (phba->sli4_hba.extents_in_use) {
+		/*
+		 * The port supports resource extents. The XRI, VPI, VFI, RPI
+		 * resource extent count must be read and allocated before
+		 * provisioning the resource id arrays.
+		 */
+		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+		    LPFC_IDX_RSRC_RDY) {
+			/*
+			 * Extent-based resources are set - the driver could
+			 * be in a port reset. Figure out if any corrective
+			 * actions need to be taken.
+			 */
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_VFI);
+			if (rc != 0)
+				error++;
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_VPI);
+			if (rc != 0)
+				error++;
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_XRI);
+			if (rc != 0)
+				error++;
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_RPI);
+			if (rc != 0)
+				error++;
+
+			/*
+			 * It's possible that the number of resources
+			 * provided to this port instance changed between
+			 * resets.  Detect this condition and reallocate
+			 * resources.  Otherwise, there is no action.
+			 */
+			if (error) {
+				lpfc_printf_log(phba, KERN_INFO,
+						LOG_MBOX | LOG_INIT,
+						"2931 Detected extent resource "
+						"change.  Reallocating all "
+						"extents.\n");
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_VFI);
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_VPI);
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_XRI);
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_RPI);
+			} else
+				return 0;
+		}
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+		if (unlikely(rc))
+			goto err_exit;
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+		if (unlikely(rc))
+			goto err_exit;
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+		if (unlikely(rc))
+			goto err_exit;
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+		if (unlikely(rc))
+			goto err_exit;
+		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+		       LPFC_IDX_RSRC_RDY);
+		return rc;
+	} else {
+		/*
+		 * The port does not support resource extents.  The XRI, VPI,
+		 * VFI, RPI resource ids were determined from READ_CONFIG.
+		 * Just allocate the bitmasks and provision the resource id
+		 * arrays.  If a port reset is active, the resources don't
+		 * need any action - just exit.
+		 */
+		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+		    LPFC_IDX_RSRC_RDY) {
+			lpfc_sli4_dealloc_resource_identifiers(phba);
+			lpfc_sli4_remove_rpis(phba);
+		}
+		/* RPIs. */
+		count = phba->sli4_hba.max_cfg_param.max_rpi;
+		if (count <= 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"3279 Invalid provisioning of "
+					"rpi:%d\n", count);
+			rc = -EINVAL;
+			goto err_exit;
+		}
+		base = phba->sli4_hba.max_cfg_param.rpi_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->sli4_hba.rpi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.rpi_ids = kzalloc(count *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_ids)) {
+			rc = -ENOMEM;
+			goto free_rpi_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->sli4_hba.rpi_ids[i] = base + i;
+
+		/* VPIs. */
+		count = phba->sli4_hba.max_cfg_param.max_vpi;
+		if (count <= 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"3280 Invalid provisioning of "
+					"vpi:%d\n", count);
+			rc = -EINVAL;
+			goto free_rpi_ids;
+		}
+		base = phba->sli4_hba.max_cfg_param.vpi_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->vpi_bmask = kzalloc(longs *
+					  sizeof(unsigned long),
+					  GFP_KERNEL);
+		if (unlikely(!phba->vpi_bmask)) {
+			rc = -ENOMEM;
+			goto free_rpi_ids;
+		}
+		phba->vpi_ids = kzalloc(count *
+					sizeof(uint16_t),
+					GFP_KERNEL);
+		if (unlikely(!phba->vpi_ids)) {
+			rc = -ENOMEM;
+			goto free_vpi_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->vpi_ids[i] = base + i;
+
+		/* XRIs. */
+		count = phba->sli4_hba.max_cfg_param.max_xri;
+		if (count <= 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"3281 Invalid provisioning of "
+					"xri:%d\n", count);
+			rc = -EINVAL;
+			goto free_vpi_ids;
+		}
+		base = phba->sli4_hba.max_cfg_param.xri_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->sli4_hba.xri_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_bmask)) {
+			rc = -ENOMEM;
+			goto free_vpi_ids;
+		}
+		phba->sli4_hba.max_cfg_param.xri_used = 0;
+		phba->sli4_hba.xri_ids = kzalloc(count *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_ids)) {
+			rc = -ENOMEM;
+			goto free_xri_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->sli4_hba.xri_ids[i] = base + i;
+
+		/* VFIs. */
+		count = phba->sli4_hba.max_cfg_param.max_vfi;
+		if (count <= 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"3282 Invalid provisioning of "
+					"vfi:%d\n", count);
+			rc = -EINVAL;
+			goto free_xri_ids;
+		}
+		base = phba->sli4_hba.max_cfg_param.vfi_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->sli4_hba.vfi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+			rc = -ENOMEM;
+			goto free_xri_ids;
+		}
+		phba->sli4_hba.vfi_ids = kzalloc(count *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_ids)) {
+			rc = -ENOMEM;
+			goto free_vfi_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->sli4_hba.vfi_ids[i] = base + i;
+
+		/*
+		 * Mark all resources ready.  An HBA reset doesn't need
+		 * to reset the initialization.
+		 */
+		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+		       LPFC_IDX_RSRC_RDY);
+		return 0;
+	}
+
+ free_vfi_bmask:
+	kfree(phba->sli4_hba.vfi_bmask);
+	phba->sli4_hba.vfi_bmask = NULL;
+ free_xri_ids:
+	kfree(phba->sli4_hba.xri_ids);
+	phba->sli4_hba.xri_ids = NULL;
+ free_xri_bmask:
+	kfree(phba->sli4_hba.xri_bmask);
+	phba->sli4_hba.xri_bmask = NULL;
+ free_vpi_ids:
+	kfree(phba->vpi_ids);
+	phba->vpi_ids = NULL;
+ free_vpi_bmask:
+	kfree(phba->vpi_bmask);
+	phba->vpi_bmask = NULL;
+ free_rpi_ids:
+	kfree(phba->sli4_hba.rpi_ids);
+	phba->sli4_hba.rpi_ids = NULL;
+ free_rpi_bmask:
+	kfree(phba->sli4_hba.rpi_bmask);
+	phba->sli4_hba.rpi_bmask = NULL;
+ err_exit:
+	return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+	if (phba->sli4_hba.extents_in_use) {
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+	} else {
+		kfree(phba->vpi_bmask);
+		phba->sli4_hba.max_cfg_param.vpi_used = 0;
+		kfree(phba->vpi_ids);
+		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		kfree(phba->sli4_hba.xri_bmask);
+		kfree(phba->sli4_hba.xri_ids);
+		kfree(phba->sli4_hba.vfi_bmask);
+		kfree(phba->sli4_hba.vfi_ids);
+		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port extent count response
+ * @extnt_size: buffer to hold port extent size response.
+ *
+ * This function calls the port to read the host allocated extents
+ * for a particular type.
+ **/
+int
+lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
+			       uint16_t *extnt_cnt, uint16_t *extnt_size)
+{
+	bool emb;
+	int rc = 0;
+	uint16_t curr_blks = 0;
+	uint32_t req_len, emb_len;
+	uint32_t alloc_len, mbox_tmo;
+	struct list_head *blk_list_head;
+	struct lpfc_rsrc_blks *rsrc_blk;
+	LPFC_MBOXQ_t *mbox;
+	void *virtaddr = NULL;
+	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+	union  lpfc_sli4_cfg_shdr *shdr;
+
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		blk_list_head = &phba->lpfc_vpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
+		break;
+	default:
+		return -EIO;
+	}
+
+	/* Count the number of extents currently allocatd for this type. */
+	list_for_each_entry(rsrc_blk, blk_list_head, list) {
+		if (curr_blks == 0) {
+			/*
+			 * The GET_ALLOCATED mailbox does not return the size,
+			 * just the count.  The size should be just the size
+			 * stored in the current allocated block and all sizes
+			 * for an extent type are the same so set the return
+			 * value now.
+			 */
+			*extnt_size = rsrc_blk->rsrc_size;
+		}
+		curr_blks++;
+	}
+
+	/*
+	 * Calculate the size of an embedded mailbox.  The uint32_t
+	 * accounts for extents-specific word.
+	 */
+	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+		sizeof(uint32_t);
+
+	/*
+	 * Presume the allocation and response will fit into an embedded
+	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
+	 */
+	emb = LPFC_SLI4_MBX_EMBED;
+	req_len = emb_len;
+	if (req_len > emb_len) {
+		req_len = curr_blks * sizeof(uint16_t) +
+			sizeof(union lpfc_sli4_cfg_shdr) +
+			sizeof(uint32_t);
+		emb = LPFC_SLI4_MBX_NEMBED;
+	}
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
+
+	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
+				     req_len, emb);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2983 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		rc = -ENOMEM;
+		goto err_exit;
+	}
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	/*
+	 * Figure out where the response is located.  Then get local pointers
+	 * to the response data.  The port does not guarantee to respond to
+	 * all extents counts request so update the local variable with the
+	 * allocated count from the port.
+	 */
+	if (emb == LPFC_SLI4_MBX_EMBED) {
+		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+		shdr = &rsrc_ext->header.cfg_shdr;
+		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+	} else {
+		virtaddr = mbox->sge_array->addr[0];
+		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+		shdr = &n_rsrc->cfg_shdr;
+		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+	}
+
+	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+			"2984 Failed to read allocated resources "
+			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
+			type,
+			bf_get(lpfc_mbox_hdr_status, &shdr->response),
+			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
+		rc = -EIO;
+		goto err_exit;
+	}
+ err_exit:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: Pointer to driver SLI ring object.
+ * @sgl_list: linked link of sgl buffers to post
+ * @cnt: number of linked list buffers
+ *
+ * This routine walks the list of buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. It attempts to construct blocks
+ * of buffer sgls which contains contiguous xris and uses the non-embedded
+ * SGL block post mailbox commands to post them to the port. For single
+ * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
+ * mailbox command for posting.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+static int
+lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+			  struct list_head *sgl_list, int cnt)
+{
+	struct lpfc_sglq *sglq_entry = NULL;
+	struct lpfc_sglq *sglq_entry_next = NULL;
+	struct lpfc_sglq *sglq_entry_first = NULL;
+	int status, total_cnt;
+	int post_cnt = 0, num_posted = 0, block_cnt = 0;
+	int last_xritag = NO_XRI;
+	LIST_HEAD(prep_sgl_list);
+	LIST_HEAD(blck_sgl_list);
+	LIST_HEAD(allc_sgl_list);
+	LIST_HEAD(post_sgl_list);
+	LIST_HEAD(free_sgl_list);
+
+	spin_lock_irq(&phba->hbalock);
+	spin_lock(&phba->sli4_hba.sgl_list_lock);
+	list_splice_init(sgl_list, &allc_sgl_list);
+	spin_unlock(&phba->sli4_hba.sgl_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	total_cnt = cnt;
+	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+				 &allc_sgl_list, list) {
+		list_del_init(&sglq_entry->list);
+		block_cnt++;
+		if ((last_xritag != NO_XRI) &&
+		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
+			/* a hole in xri block, form a sgl posting block */
+			list_splice_init(&prep_sgl_list, &blck_sgl_list);
+			post_cnt = block_cnt - 1;
+			/* prepare list for next posting block */
+			list_add_tail(&sglq_entry->list, &prep_sgl_list);
+			block_cnt = 1;
+		} else {
+			/* prepare list for next posting block */
+			list_add_tail(&sglq_entry->list, &prep_sgl_list);
+			/* enough sgls for non-embed sgl mbox command */
+			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				list_splice_init(&prep_sgl_list,
+						 &blck_sgl_list);
+				post_cnt = block_cnt;
+				block_cnt = 0;
+			}
+		}
+		num_posted++;
+
+		/* keep track of last sgl's xritag */
+		last_xritag = sglq_entry->sli4_xritag;
+
+		/* end of repost sgl list condition for buffers */
+		if (num_posted == total_cnt) {
+			if (post_cnt == 0) {
+				list_splice_init(&prep_sgl_list,
+						 &blck_sgl_list);
+				post_cnt = block_cnt;
+			} else if (block_cnt == 1) {
+				status = lpfc_sli4_post_sgl(phba,
+						sglq_entry->phys, 0,
+						sglq_entry->sli4_xritag);
+				if (!status) {
+					/* successful, put sgl to posted list */
+					list_add_tail(&sglq_entry->list,
+						      &post_sgl_list);
+				} else {
+					/* Failure, put sgl to free list */
+					lpfc_printf_log(phba, KERN_WARNING,
+						LOG_SLI,
+						"3159 Failed to post "
+						"sgl, xritag:x%x\n",
+						sglq_entry->sli4_xritag);
+					list_add_tail(&sglq_entry->list,
+						      &free_sgl_list);
+					total_cnt--;
+				}
+			}
+		}
+
+		/* continue until a nembed page worth of sgls */
+		if (post_cnt == 0)
+			continue;
+
+		/* post the buffer list sgls as a block */
+		status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
+						 post_cnt);
+
+		if (!status) {
+			/* success, put sgl list to posted sgl list */
+			list_splice_init(&blck_sgl_list, &post_sgl_list);
+		} else {
+			/* Failure, put sgl list to free sgl list */
+			sglq_entry_first = list_first_entry(&blck_sgl_list,
+							    struct lpfc_sglq,
+							    list);
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"3160 Failed to post sgl-list, "
+					"xritag:x%x-x%x\n",
+					sglq_entry_first->sli4_xritag,
+					(sglq_entry_first->sli4_xritag +
+					 post_cnt - 1));
+			list_splice_init(&blck_sgl_list, &free_sgl_list);
+			total_cnt -= post_cnt;
+		}
+
+		/* don't reset xirtag due to hole in xri block */
+		if (block_cnt == 0)
+			last_xritag = NO_XRI;
+
+		/* reset sgl post count for next round of posting */
+		post_cnt = 0;
+	}
+
+	/* free the sgls failed to post */
+	lpfc_free_sgl_list(phba, &free_sgl_list);
+
+	/* push sgls posted to the available list */
+	if (!list_empty(&post_sgl_list)) {
+		spin_lock_irq(&phba->hbalock);
+		spin_lock(&phba->sli4_hba.sgl_list_lock);
+		list_splice_init(&post_sgl_list, sgl_list);
+		spin_unlock(&phba->sli4_hba.sgl_list_lock);
+		spin_unlock_irq(&phba->hbalock);
+	} else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3161 Failure to post sgl to port.\n");
+		return -EIO;
+	}
+
+	/* return the number of XRIs actually posted */
+	return total_cnt;
+}
+
+void
+lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	uint32_t len;
+
+	len = sizeof(struct lpfc_mbx_set_host_data) -
+		sizeof(struct lpfc_sli4_cfg_mhdr);
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+			 LPFC_SLI4_MBX_EMBED);
+
+	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+	mbox->u.mqe.un.set_host_data.param_len =
+					LPFC_HOST_OS_DRIVER_VERSION_SIZE;
+	snprintf(mbox->u.mqe.un.set_host_data.data,
+		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+		 "Linux %s v"LPFC_DRIVER_VERSION,
+		 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+}
+
+int
+lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+		    struct lpfc_queue *drq, int count, int idx)
+{
+	int rc, i;
+	struct lpfc_rqe hrqe;
+	struct lpfc_rqe drqe;
+	struct lpfc_rqb *rqbp;
+	struct rqb_dmabuf *rqb_buffer;
+	LIST_HEAD(rqb_buf_list);
+
+	rqbp = hrq->rqbp;
+	for (i = 0; i < count; i++) {
+		/* IF RQ is already full, don't bother */
+		if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
+			break;
+		rqb_buffer = rqbp->rqb_alloc_buffer(phba);
+		if (!rqb_buffer)
+			break;
+		rqb_buffer->hrq = hrq;
+		rqb_buffer->drq = drq;
+		rqb_buffer->idx = idx;
+		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
+	}
+	while (!list_empty(&rqb_buf_list)) {
+		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
+				 hbuf.list);
+
+		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
+		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
+		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
+		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
+		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
+		if (rc < 0) {
+			rqbp->rqb_free_buffer(phba, rqb_buffer);
+		} else {
+			list_add_tail(&rqb_buffer->hbuf.list,
+				      &rqbp->rqb_buffer_list);
+			rqbp->buffer_count++;
+		}
+	}
+	return 1;
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device initialization PCI function. This
+ * function is called by the HBA initialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+	int rc, i, cnt;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mqe *mqe;
+	uint8_t *vpd;
+	uint32_t vpd_size;
+	uint32_t ftr_rsp = 0;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_rqb *rqbp;
+
+	/* Perform a PCI function reset to start from clean */
+	rc = lpfc_pci_function_reset(phba);
+	if (unlikely(rc))
+		return -ENODEV;
+
+	/* Check the HBA Host Status Register for readyness */
+	rc = lpfc_sli4_post_status_check(phba);
+	if (unlikely(rc))
+		return -ENODEV;
+	else {
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/*
+	 * Allocate a single mailbox container for initializing the
+	 * port.
+	 */
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	/* Issue READ_REV to collect vpd and FW information. */
+	vpd_size = SLI4_PAGE_SIZE;
+	vpd = kzalloc(vpd_size, GFP_KERNEL);
+	if (!vpd) {
+		rc = -ENOMEM;
+		goto out_free_mbox;
+	}
+
+	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+	if (unlikely(rc)) {
+		kfree(vpd);
+		goto out_free_mbox;
+	}
+
+	mqe = &mboxq->u.mqe;
+	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
+	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
+		phba->hba_flag |= HBA_FCOE_MODE;
+		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
+	} else {
+		phba->hba_flag &= ~HBA_FCOE_MODE;
+	}
+
+	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
+		LPFC_DCBX_CEE_MODE)
+		phba->hba_flag |= HBA_FIP_SUPPORT;
+	else
+		phba->hba_flag &= ~HBA_FIP_SUPPORT;
+
+	phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0376 READ_REV Error. SLI Level %d "
+			"FCoE enabled %d\n",
+			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
+		rc = -EIO;
+		kfree(vpd);
+		goto out_free_mbox;
+	}
+
+	/*
+	 * Continue initialization with default values even if driver failed
+	 * to read FCoE param config regions, only read parameters if the
+	 * board is FCoE
+	 */
+	if (phba->hba_flag & HBA_FCOE_MODE &&
+	    lpfc_sli4_read_fcoe_params(phba))
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+			"2570 Failed to read FCoE parameters\n");
+
+	/*
+	 * Retrieve sli4 device physical port name, failure of doing it
+	 * is considered as non-fatal.
+	 */
+	rc = lpfc_sli4_retrieve_pport_name(phba);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"3080 Successful retrieving SLI4 device "
+				"physical port name: %s.\n", phba->Port);
+
+	/*
+	 * Evaluate the read rev and vpd data. Populate the driver
+	 * state with the results. If this routine fails, the failure
+	 * is not fatal as the driver will use generic values.
+	 */
+	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+	if (unlikely(!rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0377 Error %d parsing vpd. "
+				"Using defaults.\n", rc);
+		rc = 0;
+	}
+	kfree(vpd);
+
+	/* Save information as VPD data */
+	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
+	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
+	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
+					 &mqe->un.read_rev);
+	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
+				       &mqe->un.read_rev);
+	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
+					    &mqe->un.read_rev);
+	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
+					   &mqe->un.read_rev);
+	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
+	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
+	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
+	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
+	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
+	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0380 READ_REV Status x%x "
+			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			bf_get(lpfc_mqe_status, mqe),
+			phba->vpd.rev.opFwName,
+			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
+			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+
+	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
+	rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+	if (phba->pport->cfg_lun_queue_depth > rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"3362 LUN queue depth changed from %d to %d\n",
+				phba->pport->cfg_lun_queue_depth, rc);
+		phba->pport->cfg_lun_queue_depth = rc;
+	}
+
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_0) {
+		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc == MBX_SUCCESS) {
+			phba->hba_flag |= HBA_RECOVERABLE_UE;
+			/* Set 1Sec interval to detect UE */
+			phba->eratt_poll_interval = 1;
+			phba->sli4_hba.ue_to_sr = bf_get(
+					lpfc_mbx_set_feature_UESR,
+					&mboxq->u.mqe.un.set_feature);
+			phba->sli4_hba.ue_to_rp = bf_get(
+					lpfc_mbx_set_feature_UERP,
+					&mboxq->u.mqe.un.set_feature);
+		}
+	}
+
+	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
+		/* Enable MDS Diagnostics only if the SLI Port supports it */
+		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS)
+			phba->mds_diags_support = 0;
+	}
+
+	/*
+	 * Discover the port's supported feature set and match it against the
+	 * hosts requests.
+	 */
+	lpfc_request_features(phba, mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	/*
+	 * The port must support FCP initiator mode as this is the
+	 * only mode running in the host.
+	 */
+	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0378 No support for fcpi mode.\n");
+		ftr_rsp++;
+	}
+	if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+		phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+	else
+		phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
+	/*
+	 * If the port cannot support the host's requested features
+	 * then turn off the global config parameters to disable the
+	 * feature in the driver.  This is not a fatal error.
+	 */
+	phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
+	if (phba->cfg_enable_bg) {
+		if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
+			phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+		else
+			ftr_rsp++;
+	}
+
+	if (phba->max_vpi && phba->cfg_enable_npiv &&
+	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+		ftr_rsp++;
+
+	if (ftr_rsp) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0379 Feature Mismatch Data: x%08x %08x "
+				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
+				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
+				phba->cfg_enable_npiv, phba->max_vpi);
+		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+			phba->cfg_enable_bg = 0;
+		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+			phba->cfg_enable_npiv = 0;
+	}
+
+	/* These SLI3 features are assumed in SLI4 */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
+	spin_unlock_irq(&phba->hbalock);
+
+	/*
+	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
+	 * calls depends on these resources to complete port setup.
+	 */
+	rc = lpfc_sli4_alloc_resource_identifiers(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"2920 Failed to alloc Resource IDs "
+				"rc = x%x\n", rc);
+		goto out_free_mbox;
+	}
+
+	lpfc_set_host_data(phba, mboxq);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"2134 Failed to set host os driver version %x",
+				rc);
+	}
+
+	/* Read the port's service parameters. */
+	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+	if (rc) {
+		phba->link_state = LPFC_HBA_ERROR;
+		rc = -ENOMEM;
+		goto out_free_mbox;
+	}
+
+	mboxq->vport = vport;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	if (rc == MBX_SUCCESS) {
+		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
+		rc = 0;
+	}
+
+	/*
+	 * This memory was allocated by the lpfc_read_sparam routine. Release
+	 * it to the mbuf pool.
+	 */
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mboxq->context1 = NULL;
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0382 READ_SPARAM command failed "
+				"status %d, mbxStatus x%x\n",
+				rc, bf_get(lpfc_mqe_status, mqe));
+		phba->link_state = LPFC_HBA_ERROR;
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	lpfc_update_vport_wwn(vport);
+
+	/* Update the fc_host data structures with new wwn. */
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+
+	/* Create all the SLI4 queues */
+	rc = lpfc_sli4_queue_create(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3089 Failed to allocate queues\n");
+		rc = -ENODEV;
+		goto out_free_mbox;
+	}
+	/* Set up all the queues to the device */
+	rc = lpfc_sli4_queue_setup(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0381 Error %d during queue setup.\n ", rc);
+		goto out_stop_timers;
+	}
+	/* Initialize the driver internal SLI layer lists. */
+	lpfc_sli4_setup(phba);
+	lpfc_sli4_queue_init(phba);
+
+	/* update host els xri-sgl sizes and mappings */
+	rc = lpfc_sli4_els_sgl_update(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"1400 Failed to update xri-sgl size and "
+				"mapping: %d\n", rc);
+		goto out_destroy_queue;
+	}
+
+	/* register the els sgl pool to the port */
+	rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
+				       phba->sli4_hba.els_xri_cnt);
+	if (unlikely(rc < 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0582 Error %d during els sgl post "
+				"operation\n", rc);
+		rc = -ENODEV;
+		goto out_destroy_queue;
+	}
+	phba->sli4_hba.els_xri_cnt = rc;
+
+	if (phba->nvmet_support) {
+		/* update host nvmet xri-sgl sizes and mappings */
+		rc = lpfc_sli4_nvmet_sgl_update(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"6308 Failed to update nvmet-sgl size "
+					"and mapping: %d\n", rc);
+			goto out_destroy_queue;
+		}
+
+		/* register the nvmet sgl pool to the port */
+		rc = lpfc_sli4_repost_sgl_list(
+			phba,
+			&phba->sli4_hba.lpfc_nvmet_sgl_list,
+			phba->sli4_hba.nvmet_xri_cnt);
+		if (unlikely(rc < 0)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"3117 Error %d during nvmet "
+					"sgl post\n", rc);
+			rc = -ENODEV;
+			goto out_destroy_queue;
+		}
+		phba->sli4_hba.nvmet_xri_cnt = rc;
+
+		cnt = phba->cfg_iocb_cnt * 1024;
+		/* We need 1 iocbq for every SGL, for IO processing */
+		cnt += phba->sli4_hba.nvmet_xri_cnt;
+	} else {
+		/* update host scsi xri-sgl sizes and mappings */
+		rc = lpfc_sli4_scsi_sgl_update(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"6309 Failed to update scsi-sgl size "
+					"and mapping: %d\n", rc);
+			goto out_destroy_queue;
+		}
+
+		/* update host nvme xri-sgl sizes and mappings */
+		rc = lpfc_sli4_nvme_sgl_update(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"6082 Failed to update nvme-sgl size "
+					"and mapping: %d\n", rc);
+			goto out_destroy_queue;
+		}
+
+		cnt = phba->cfg_iocb_cnt * 1024;
+	}
+
+	if (!phba->sli.iocbq_lookup) {
+		/* Initialize and populate the iocb list per host */
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2821 initialize iocb list %d total %d\n",
+				phba->cfg_iocb_cnt, cnt);
+		rc = lpfc_init_iocb_list(phba, cnt);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1413 Failed to init iocb list.\n");
+			goto out_destroy_queue;
+		}
+	}
+
+	if (phba->nvmet_support)
+		lpfc_nvmet_create_targetport(phba);
+
+	if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
+		/* Post initial buffers to all RQs created */
+		for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+			rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
+			INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
+			rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
+			rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
+			rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
+			rqbp->buffer_count = 0;
+
+			lpfc_post_rq_buffer(
+				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
+				phba->sli4_hba.nvmet_mrq_data[i],
+				LPFC_NVMET_RQE_DEF_COUNT, i);
+		}
+	}
+
+	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+		/* register the allocated scsi sgl pool to the port */
+		rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"0383 Error %d during scsi sgl post "
+					"operation\n", rc);
+			/* Some Scsi buffers were moved to abort scsi list */
+			/* A pci function reset will repost them */
+			rc = -ENODEV;
+			goto out_destroy_queue;
+		}
+	}
+
+	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+	    (phba->nvmet_support == 0)) {
+
+		/* register the allocated nvme sgl pool to the port */
+		rc = lpfc_repost_nvme_sgl_list(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"6116 Error %d during nvme sgl post "
+					"operation\n", rc);
+			/* Some NVME buffers were moved to abort nvme list */
+			/* A pci function reset will repost them */
+			rc = -ENODEV;
+			goto out_destroy_queue;
+		}
+	}
+
+	/* Post the rpi header region to the device. */
+	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0393 Error %d during rpi post operation\n",
+				rc);
+		rc = -ENODEV;
+		goto out_destroy_queue;
+	}
+	lpfc_sli4_node_prep(phba);
+
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
+			/*
+			 * The FC Port needs to register FCFI (index 0)
+			 */
+			lpfc_reg_fcfi(phba, mboxq);
+			mboxq->vport = phba->pport;
+			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+			if (rc != MBX_SUCCESS)
+				goto out_unset_queue;
+			rc = 0;
+			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
+						&mboxq->u.mqe.un.reg_fcfi);
+		} else {
+			/* We are a NVME Target mode with MRQ > 1 */
+
+			/* First register the FCFI */
+			lpfc_reg_fcfi_mrq(phba, mboxq, 0);
+			mboxq->vport = phba->pport;
+			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+			if (rc != MBX_SUCCESS)
+				goto out_unset_queue;
+			rc = 0;
+			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
+						&mboxq->u.mqe.un.reg_fcfi_mrq);
+
+			/* Next register the MRQs */
+			lpfc_reg_fcfi_mrq(phba, mboxq, 1);
+			mboxq->vport = phba->pport;
+			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+			if (rc != MBX_SUCCESS)
+				goto out_unset_queue;
+			rc = 0;
+		}
+		/* Check if the port is configured to be disabled */
+		lpfc_sli_read_link_ste(phba);
+	}
+
+	/* Arm the CQs and then EQs on device */
+	lpfc_sli4_arm_cqeq_intr(phba);
+
+	/* Indicate device interrupt mode */
+	phba->sli4_hba.intr_enable = 1;
+
+	/* Allow asynchronous mailbox command to go through */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Post receive buffers to the device */
+	lpfc_sli4_rb_setup(phba);
+
+	/* Reset HBA FCF states after HBA reset */
+	phba->fcf.fcf_flag = 0;
+	phba->fcf.current_rec.flag = 0;
+
+	/* Start the ELS watchdog timer */
+	mod_timer(&vport->els_tmofunc,
+		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
+
+	/* Start heart beat timer */
+	mod_timer(&phba->hb_tmofunc,
+		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+	phba->hb_outstanding = 0;
+	phba->last_completion_time = jiffies;
+
+	/* Start error attention (ERATT) polling timer */
+	mod_timer(&phba->eratt_poll,
+		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+
+	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
+	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+		rc = pci_enable_pcie_error_reporting(phba->pcidev);
+		if (!rc) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2829 This device supports "
+					"Advanced Error Reporting (AER)\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag |= HBA_AER_ENABLED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2830 This device does not support "
+					"Advanced Error Reporting (AER)\n");
+			phba->cfg_aer_support = 0;
+		}
+		rc = 0;
+	}
+
+	/*
+	 * The port is ready, set the host's link state to LINK_DOWN
+	 * in preparation for link interrupts.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_LINK_DOWN;
+	spin_unlock_irq(&phba->hbalock);
+	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+	    (phba->hba_flag & LINK_DISABLED)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+				"3103 Adapter Link is disabled.\n");
+		lpfc_down_link(phba, mboxq);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+					"3104 Adapter failed to issue "
+					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
+			goto out_unset_queue;
+		}
+	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+		/* don't perform init_link on SLI4 FC port loopback test */
+		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+			if (rc)
+				goto out_unset_queue;
+		}
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+out_unset_queue:
+	/* Unset all the queues set up in this routine when error out */
+	lpfc_sli4_queue_unset(phba);
+out_destroy_queue:
+	lpfc_free_iocb_list(phba);
+	lpfc_sli4_queue_destroy(phba);
+out_stop_timers:
+	lpfc_stop_hba_timers(phba);
+out_free_mbox:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_mbox_timeout - Timeout call back function for mbox timer
+ * @ptr: context object - pointer to hba structure.
+ *
+ * This is the callback function for mailbox timer. The mailbox
+ * timer is armed when a new mailbox command is issued and the timer
+ * is deleted when the mailbox complete. The function is called by
+ * the kernel timer code when a mailbox does not complete within
+ * expected time. This function wakes up the worker thread to
+ * process the mailbox timeout and returns. All the processing is
+ * done by the worker thread function lpfc_mbox_timeout_handler.
+ **/
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
+	unsigned long iflag;
+	uint32_t tmo_posted;
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ *                                    are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+static bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+	uint32_t idx;
+	struct lpfc_queue *mcq;
+	struct lpfc_mcqe *mcqe;
+	bool pending_completions = false;
+
+	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+		return false;
+
+	/* Check for completions on mailbox completion queue */
+
+	mcq = phba->sli4_hba.mbx_cq;
+	idx = mcq->hba_index;
+	while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+		mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+			pending_completions = true;
+			break;
+		}
+		idx = (idx + 1) % mcq->entry_count;
+		if (mcq->hba_index == idx)
+			break;
+	}
+	return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ *					      that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+	uint32_t eqidx;
+	struct lpfc_queue *fpeq = NULL;
+	struct lpfc_eqe *eqe;
+	bool mbox_pending;
+
+	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+		return false;
+
+	/* Find the eq associated with the mcq */
+
+	if (phba->sli4_hba.hba_eq)
+		for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
+			if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+			    phba->sli4_hba.mbx_cq->assoc_qid) {
+				fpeq = phba->sli4_hba.hba_eq[eqidx];
+				break;
+			}
+	if (!fpeq)
+		return false;
+
+	/* Turn off interrupts from this EQ */
+
+	lpfc_sli4_eq_clr_intr(fpeq);
+
+	/* Check to see if a mbox completion is pending */
+
+	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+	/*
+	 * If a mbox completion is pending, process all the events on EQ
+	 * associated with the mbox completion queue (this could include
+	 * mailbox commands, async events, els commands, receive queue data
+	 * and fcp commands)
+	 */
+
+	if (mbox_pending)
+		while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+			lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+			fpeq->EQ_processed++;
+		}
+
+	/* Always clear and re-arm the EQ */
+
+	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+	return mbox_pending;
+
+}
+
+/**
+ * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from worker thread when a mailbox command times out.
+ * The caller is not required to hold any locks. This function will reset the
+ * HBA and recover all the pending commands.
+ **/
+void
+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
+	MAILBOX_t *mb = NULL;
+
+	struct lpfc_sli *psli = &phba->sli;
+
+	/* If the mailbox completed, process the completion and return */
+	if (lpfc_sli4_process_missed_mbox_completions(phba))
+		return;
+
+	if (pmbox != NULL)
+		mb = &pmbox->u.mb;
+	/* Check the pmbox pointer first.  There is a race condition
+	 * between the mbox timeout handler getting executed in the
+	 * worklist and the mailbox actually completing. When this
+	 * race condition occurs, the mbox_active will be NULL.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (pmbox == NULL) {
+		lpfc_printf_log(phba, KERN_WARNING,
+				LOG_MBOX | LOG_SLI,
+				"0353 Active Mailbox cleared - mailbox timeout "
+				"exiting\n");
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* Mbox cmd <mbxCommand> timeout */
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+			mb->mbxCommand,
+			phba->pport->port_state,
+			phba->sli.sli_flag,
+			phba->sli.mbox_active);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Setting state unknown so lpfc_sli_abort_iocb_ring
+	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
+	 * it to fail all outstanding SCSI IO.
+	 */
+	spin_lock_irq(&phba->pport->work_port_lock);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irq(&phba->pport->work_port_lock);
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_LINK_UNKNOWN;
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_sli_abort_fcp_rings(phba);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0345 Resetting board due to mailbox timeout\n");
+
+	/* Reset the HBA device */
+	lpfc_reset_hba(phba);
+}
+
+/**
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
+ * The mailbox command can be submitted in polling mode, in which case
+ * this function will wait in a polling loop for the completion of the
+ * mailbox.
+ * If the mailbox is submitted in no_wait mode (not polling) the
+ * function will submit the command and returns immediately without waiting
+ * for the mailbox completion. The no_wait is supported only when HBA
+ * is in SLI2/SLI3 mode - interrupts are enabled.
+ * The SLI interface allows only one mailbox pending at a time. If the
+ * mailbox is issued in polling mode and there is already a mailbox
+ * pending, then the function will return an error. If the mailbox is issued
+ * in NO_WAIT mode and there is a mailbox pending already, the function
+ * will return MBX_BUSY after queuing the mailbox into mailbox queue.
+ * The sli layer owns the mailbox object until the completion of mailbox
+ * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
+ * return codes the caller owns the mailbox command after the return of
+ * the function.
+ **/
+static int
+lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
+		       uint32_t flag)
+{
+	MAILBOX_t *mbx;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t status, evtctr;
+	uint32_t ha_copy, hc_copy;
+	int i;
+	unsigned long timeout;
+	unsigned long drvr_flag = 0;
+	uint32_t word0, ldata;
+	void __iomem *to_slim;
+	int processing_queue = 0;
+
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	if (!pmbox) {
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		/* processing mbox queue from intr_handler */
+		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			return MBX_SUCCESS;
+		}
+		processing_queue = 1;
+		pmbox = lpfc_mbox_get(phba);
+		if (!pmbox) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			return MBX_SUCCESS;
+		}
+	}
+
+	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+		if(!pmbox->vport) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_VPORT,
+					"1806 Mbox x%x failed. No vport\n",
+					pmbox->u.mb.mbxCommand);
+			dump_stack();
+			goto out_not_finished;
+		}
+	}
+
+	/* If the PCI channel is in offline state, do not post mbox. */
+	if (unlikely(pci_channel_offline(phba->pcidev))) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+		goto out_not_finished;
+	}
+
+	/* If HBA has a deferred error attention, fail the iocb. */
+	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+		goto out_not_finished;
+	}
+
+	psli = &phba->sli;
+
+	mbx = &pmbox->u.mb;
+	status = MBX_SUCCESS;
+
+	if (phba->link_state == LPFC_HBA_ERROR) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+		/* Mbox command <mbxCommand> cannot issue */
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):0311 Mailbox command x%x cannot "
+				"issue Data: x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0,
+				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
+			!(hc_copy & HC_MBINT_ENA)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2528 Mailbox command x%x cannot "
+				"issue Data: x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0,
+				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+	}
+
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		/* Polling for a mbox command when another one is already active
+		 * is not allowed in SLI. Also, the driver must have established
+		 * SLI2 mode to queue and process multiple mbox commands.
+		 */
+
+		if (flag & MBX_POLL) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+			/* Mbox command <mbxCommand> cannot issue */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2529 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+
+		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			/* Mbox command <mbxCommand> cannot issue */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2530 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+
+		/* Another mailbox command is still being processed, queue this
+		 * command to be processed later.
+		 */
+		lpfc_mbox_put(phba, pmbox);
+
+		/* Mbox cmd issue - BUSY */
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"(%d):0308 Mbox cmd issue - BUSY Data: "
+				"x%x x%x x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
+				mbx->mbxCommand,
+				phba->pport ? phba->pport->port_state : 0xff,
+				psli->sli_flag, flag);
+
+		psli->slistat.mbox_busy++;
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+		if (pmbox->vport) {
+			lpfc_debugfs_disc_trc(pmbox->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
+				(uint32_t)mbx->mbxCommand,
+				mbx->un.varWords[0], mbx->un.varWords[1]);
+		}
+		else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
+				(uint32_t)mbx->mbxCommand,
+				mbx->un.varWords[0], mbx->un.varWords[1]);
+		}
+
+		return MBX_BUSY;
+	}
+
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+	/* If we are not polling, we MUST be in SLI2 mode */
+	if (flag != MBX_POLL) {
+		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
+		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
+			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			/* Mbox command <mbxCommand> cannot issue */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2531 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+		/* timeout active mbox command */
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+					   1000);
+		mod_timer(&psli->mbox_tmo, jiffies + timeout);
+	}
+
+	/* Mailbox cmd <cmd> issue */
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
+			"x%x\n",
+			pmbox->vport ? pmbox->vport->vpi : 0,
+			mbx->mbxCommand,
+			phba->pport ? phba->pport->port_state : 0xff,
+			psli->sli_flag, flag);
+
+	if (mbx->mbxCommand != MBX_HEARTBEAT) {
+		if (pmbox->vport) {
+			lpfc_debugfs_disc_trc(pmbox->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Send vport: cmd:x%x mb:x%x x%x",
+				(uint32_t)mbx->mbxCommand,
+				mbx->un.varWords[0], mbx->un.varWords[1]);
+		}
+		else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Send:       cmd:x%x mb:x%x x%x",
+				(uint32_t)mbx->mbxCommand,
+				mbx->un.varWords[0], mbx->un.varWords[1]);
+		}
+	}
+
+	psli->slistat.mbox_cmd++;
+	evtctr = psli->slistat.mbox_event;
+
+	/* next set own bit for the adapter and copy over command word */
+	mbx->mbxOwner = OWN_CHIP;
+
+	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+		/* Populate mbox extension offset word. */
+		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
+				= (uint8_t *)phba->mbox_ext
+				  - (uint8_t *)phba->mbox;
+		}
+
+		/* Copy the mailbox extension data */
+		if (pmbox->in_ext_byte_len && pmbox->context2) {
+			lpfc_sli_pcimem_bcopy(pmbox->context2,
+				(uint8_t *)phba->mbox_ext,
+				pmbox->in_ext_byte_len);
+		}
+		/* Copy command data to host SLIM area */
+		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
+	} else {
+		/* Populate mbox extension offset word. */
+		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
+				= MAILBOX_HBA_EXT_OFFSET;
+
+		/* Copy the mailbox extension data */
+		if (pmbox->in_ext_byte_len && pmbox->context2)
+			lpfc_memcpy_to_slim(phba->MBslimaddr +
+				MAILBOX_HBA_EXT_OFFSET,
+				pmbox->context2, pmbox->in_ext_byte_len);
+
+		if (mbx->mbxCommand == MBX_CONFIG_PORT)
+			/* copy command data into host mbox for cmpl */
+			lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
+					      MAILBOX_CMD_SIZE);
+
+		/* First copy mbox command data to HBA SLIM, skip past first
+		   word */
+		to_slim = phba->MBslimaddr + sizeof (uint32_t);
+		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
+			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
+
+		/* Next copy over first word, with mbxOwner set */
+		ldata = *((uint32_t *)mbx);
+		to_slim = phba->MBslimaddr;
+		writel(ldata, to_slim);
+		readl(to_slim); /* flush */
+
+		if (mbx->mbxCommand == MBX_CONFIG_PORT)
+			/* switch over to host mailbox */
+			psli->sli_flag |= LPFC_SLI_ACTIVE;
+	}
+
+	wmb();
+
+	switch (flag) {
+	case MBX_NOWAIT:
+		/* Set up reference to mailbox command */
+		psli->mbox_active = pmbox;
+		/* Interrupt board to do it */
+		writel(CA_MBATT, phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+		/* Don't wait for it to finish, just return */
+		break;
+
+	case MBX_POLL:
+		/* Set up null reference to mailbox command */
+		psli->mbox_active = NULL;
+		/* Interrupt board to do it */
+		writel(CA_MBATT, phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+
+		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+			/* First read mbox status word */
+			word0 = *((uint32_t *)phba->mbox);
+			word0 = le32_to_cpu(word0);
+		} else {
+			/* First read mbox status word */
+			if (lpfc_readl(phba->MBslimaddr, &word0)) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
+		}
+
+		/* Read the HBA Host Attention Register */
+		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+			spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+			goto out_not_finished;
+		}
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+							1000) + jiffies;
+		i = 0;
+		/* Wait for command to complete */
+		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
+		       (!(ha_copy & HA_MBATT) &&
+			(phba->link_state > LPFC_WARM_START))) {
+			if (time_after(jiffies, timeout)) {
+				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
+
+			/* Check if we took a mbox interrupt while we were
+			   polling */
+			if (((word0 & OWN_CHIP) != OWN_CHIP)
+			    && (evtctr != psli->slistat.mbox_event))
+				break;
+
+			if (i++ > 10) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				msleep(1);
+				spin_lock_irqsave(&phba->hbalock, drvr_flag);
+			}
+
+			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+				/* First copy command data */
+				word0 = *((uint32_t *)phba->mbox);
+				word0 = le32_to_cpu(word0);
+				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
+					MAILBOX_t *slimmb;
+					uint32_t slimword0;
+					/* Check real SLIM for any errors */
+					slimword0 = readl(phba->MBslimaddr);
+					slimmb = (MAILBOX_t *) & slimword0;
+					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
+					    && slimmb->mbxStatus) {
+						psli->sli_flag &=
+						    ~LPFC_SLI_ACTIVE;
+						word0 = slimword0;
+					}
+				}
+			} else {
+				/* First copy command data */
+				word0 = readl(phba->MBslimaddr);
+			}
+			/* Read the HBA Host Attention Register */
+			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
+		}
+
+		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+			/* copy results back to user */
+			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
+						MAILBOX_CMD_SIZE);
+			/* Copy the mailbox extension data */
+			if (pmbox->out_ext_byte_len && pmbox->context2) {
+				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+						      pmbox->context2,
+						      pmbox->out_ext_byte_len);
+			}
+		} else {
+			/* First copy command data */
+			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
+						MAILBOX_CMD_SIZE);
+			/* Copy the mailbox extension data */
+			if (pmbox->out_ext_byte_len && pmbox->context2) {
+				lpfc_memcpy_from_slim(pmbox->context2,
+					phba->MBslimaddr +
+					MAILBOX_HBA_EXT_OFFSET,
+					pmbox->out_ext_byte_len);
+			}
+		}
+
+		writel(HA_MBATT, phba->HAregaddr);
+		readl(phba->HAregaddr); /* flush */
+
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		status = mbx->mbxStatus;
+	}
+
+	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+	return status;
+
+out_not_finished:
+	if (processing_queue) {
+		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		lpfc_mbox_cmpl_put(phba, pmbox);
+	}
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function blocks the posting of SLI4 asynchronous mailbox commands from
+ * the driver internal pending mailbox queue. It will then try to wait out the
+ * possible outstanding mailbox command before return.
+ *
+ * Returns:
+ * 	0 - the outstanding mailbox command completed; otherwise, the wait for
+ * 	the outstanding mailbox command timed out.
+ **/
+static int
+lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	int rc = 0;
+	unsigned long timeout = 0;
+
+	/* Mark the asynchronous mailbox command posting as blocked */
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+	/* Determine how long we might wait for the active mailbox
+	 * command to be gracefully completed by firmware.
+	 */
+	if (phba->sli.mbox_active)
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+						phba->sli.mbox_active) *
+						1000) + jiffies;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Make sure the mailbox is really active */
+	if (timeout)
+		lpfc_sli4_process_missed_mbox_completions(phba);
+
+	/* Wait for the outstnading mailbox command to complete */
+	while (phba->sli.mbox_active) {
+		/* Check active mailbox complete status every 2ms */
+		msleep(2);
+		if (time_after(jiffies, timeout)) {
+			/* Timeout, marked the outstanding cmd not complete */
+			rc = 1;
+			break;
+		}
+	}
+
+	/* Can not cleanly block async mailbox command, fails it */
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function unblocks and resume posting of SLI4 asynchronous mailbox
+ * commands from the driver internal pending mailbox queue. It makes sure
+ * that there is no outstanding mailbox command before resuming posting
+ * asynchronous mailbox commands. If, for any reason, there is outstanding
+ * mailbox command, it will try to wait it out before resuming asynchronous
+ * mailbox command posting.
+ **/
+static void
+lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+
+	spin_lock_irq(&phba->hbalock);
+	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+		/* Asynchronous mailbox posting is not blocked, do nothing */
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* Outstanding synchronous mailbox command is guaranteed to be done,
+	 * successful or timeout, after timing-out the outstanding mailbox
+	 * command shall always be removed, so just unblock posting async
+	 * mailbox command and resume
+	 */
+	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* wake up worker thread to post asynchronlous mailbox command */
+	lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function waits for the bootstrap mailbox register ready bit from
+ * port for twice the regular mailbox command timeout value.
+ *
+ *      0 - no timeout on waiting for bootstrap mailbox register ready.
+ *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
+ **/
+static int
+lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	uint32_t db_ready;
+	unsigned long timeout;
+	struct lpfc_register bmbx_reg;
+
+	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
+				   * 1000) + jiffies;
+
+	do {
+		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+		if (!db_ready)
+			msleep(2);
+
+		if (time_after(jiffies, timeout))
+			return MBXERR_ERROR;
+	} while (!db_ready);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function posts a mailbox to the port.  The mailbox is expected
+ * to be comletely filled in and ready for the port to operate on it.
+ * This routine executes a synchronous completion operation on the
+ * mailbox by polling for its completion.
+ *
+ * The caller must not be holding any locks when calling this routine.
+ *
+ * Returns:
+ *	MBX_SUCCESS - mailbox posted successfully
+ *	Any of the MBX error values.
+ **/
+static int
+lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc = MBX_SUCCESS;
+	unsigned long iflag;
+	uint32_t mcqe_status;
+	uint32_t mbx_cmnd;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_mqe *mb = &mboxq->u.mqe;
+	struct lpfc_bmbx_create *mbox_rgn;
+	struct dma_address *dma_address;
+
+	/*
+	 * Only one mailbox can be active to the bootstrap mailbox region
+	 * at a time and there is no queueing provided.
+	 */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2532 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, MBX_POLL);
+		return MBXERR_ERROR;
+	}
+	/* The server grabs the token and owns it until release */
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = mboxq;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/* wait for bootstrap mbox register for readyness */
+	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+	if (rc)
+		goto exit;
+
+	/*
+	 * Initialize the bootstrap memory region to avoid stale data areas
+	 * in the mailbox post.  Then copy the caller's mailbox contents to
+	 * the bmbx mailbox region.
+	 */
+	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
+	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
+	lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+			      sizeof(struct lpfc_mqe));
+
+	/* Post the high mailbox dma address to the port and wait for ready. */
+	dma_address = &phba->sli4_hba.bmbx.dma_address;
+	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
+
+	/* wait for bootstrap mbox register for hi-address write done */
+	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+	if (rc)
+		goto exit;
+
+	/* Post the low mailbox dma address to the port. */
+	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
+
+	/* wait for bootstrap mbox register for low address write done */
+	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
+	if (rc)
+		goto exit;
+
+	/*
+	 * Read the CQ to ensure the mailbox has completed.
+	 * If so, update the mailbox status so that the upper layers
+	 * can complete the request normally.
+	 */
+	lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+			      sizeof(struct lpfc_mqe));
+	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
+	lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+			      sizeof(struct lpfc_mcqe));
+	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
+	/*
+	 * When the CQE status indicates a failure and the mailbox status
+	 * indicates success then copy the CQE status into the mailbox status
+	 * (and prefix it with x4000).
+	 */
+	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+			bf_set(lpfc_mqe_status, mb,
+			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
+		rc = MBXERR_ERROR;
+	} else
+		lpfc_sli4_swap_str(phba, mboxq);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
+			" x%x x%x CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+			bf_get(lpfc_mqe_status, mb),
+			mb->un.mb_words[0], mb->un.mb_words[1],
+			mb->un.mb_words[2], mb->un.mb_words[3],
+			mb->un.mb_words[4], mb->un.mb_words[5],
+			mb->un.mb_words[6], mb->un.mb_words[7],
+			mb->un.mb_words[8], mb->un.mb_words[9],
+			mb->un.mb_words[10], mb->un.mb_words[11],
+			mb->un.mb_words[12], mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
+exit:
+	/* We are holding the token, no needed for lock when release */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rc;
+}
+
+/**
+ * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code to submit
+ * a mailbox command to firmware with SLI-4 interface spec.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+static int
+lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+		       uint32_t flag)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	unsigned long iflags;
+	int rc;
+
+	/* dump from issue mailbox command if setup */
+	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
+
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2544 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	/* Detect polling mode and jump to a handler */
+	if (!phba->sli4_hba.intr_enable) {
+		if (flag == MBX_POLL)
+			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+		else
+			rc = -EIO;
+		if (rc != MBX_SUCCESS)
+			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+					"(%d):2541 Mailbox command x%x "
+					"(x%x/x%x) failure: "
+					"mqe_sta: x%x mcqe_sta: x%x/x%x "
+					"Data: x%x x%x\n,",
+					mboxq->vport ? mboxq->vport->vpi : 0,
+					mboxq->u.mb.mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									mboxq),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									mboxq),
+					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+					bf_get(lpfc_mcqe_ext_status,
+					       &mboxq->mcqe),
+					psli->sli_flag, flag);
+		return rc;
+	} else if (flag == MBX_POLL) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"(%d):2542 Try to issue mailbox command "
+				"x%x (x%x/x%x) synchronously ahead of async"
+				"mailbox command queue: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		/* Try to block the asynchronous mailbox posting */
+		rc = lpfc_sli4_async_mbox_block(phba);
+		if (!rc) {
+			/* Successfully blocked, now issue sync mbox cmd */
+			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+			if (rc != MBX_SUCCESS)
+				lpfc_printf_log(phba, KERN_WARNING,
+					LOG_MBOX | LOG_SLI,
+					"(%d):2597 Sync Mailbox command "
+					"x%x (x%x/x%x) failure: "
+					"mqe_sta: x%x mcqe_sta: x%x/x%x "
+					"Data: x%x x%x\n,",
+					mboxq->vport ? mboxq->vport->vpi : 0,
+					mboxq->u.mb.mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									mboxq),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									mboxq),
+					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+					bf_get(lpfc_mcqe_ext_status,
+					       &mboxq->mcqe),
+					psli->sli_flag, flag);
+			/* Unblock the async mailbox posting afterward */
+			lpfc_sli4_async_mbox_unblock(phba);
+		}
+		return rc;
+	}
+
+	/* Now, interrupt mode asynchrous mailbox command */
+	rc = lpfc_mbox_cmd_check(phba, mboxq);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2543 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	/* Put the mailbox command to the driver internal FIFO */
+	psli->slistat.mbox_busy++;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	lpfc_mbox_put(phba, mboxq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0354 Mbox cmd issue - Enqueue Data: "
+			"x%x (x%x/x%x) x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
+			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+			phba->pport->port_state,
+			psli->sli_flag, MBX_NOWAIT);
+	/* Wake up worker thread to transport mailbox command from head */
+	lpfc_worker_wake_up(phba);
+
+	return MBX_BUSY;
+
+out_not_finished:
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called by worker thread to send a mailbox command to
+ * SLI4 HBA firmware.
+ *
+ **/
+int
+lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mboxq;
+	int rc = MBX_SUCCESS;
+	unsigned long iflags;
+	struct lpfc_mqe *mqe;
+	uint32_t mbx_cmnd;
+
+	/* Check interrupt mode before post async mailbox command */
+	if (unlikely(!phba->sli4_hba.intr_enable))
+		return MBX_NOT_FINISHED;
+
+	/* Check for mailbox command service token */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_NOT_FINISHED;
+	}
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_NOT_FINISHED;
+	}
+	if (unlikely(phba->sli.mbox_active)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0384 There is pending active mailbox cmd\n");
+		return MBX_NOT_FINISHED;
+	}
+	/* Take the mailbox command service token */
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+	/* Get the next mailbox command from head of queue */
+	mboxq = lpfc_mbox_get(phba);
+
+	/* If no more mailbox command waiting for post, we're done */
+	if (!mboxq) {
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_SUCCESS;
+	}
+	phba->sli.mbox_active = mboxq;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	/* Check device readiness for posting mailbox command */
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc))
+		/* Driver clean routine will clean up pending mailbox */
+		goto out_not_finished;
+
+	/* Prepare the mbox command to be posted */
+	mqe = &mboxq->u.mqe;
+	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
+
+	/* Start timer for the mbox_tmo and log some mailbox post messages */
+	mod_timer(&psli->mbox_tmo, (jiffies +
+		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
+			"x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+			phba->pport->port_state, psli->sli_flag);
+
+	if (mbx_cmnd != MBX_HEARTBEAT) {
+		if (mboxq->vport) {
+			lpfc_debugfs_disc_trc(mboxq->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Send vport: cmd:x%x mb:x%x x%x",
+				mbx_cmnd, mqe->un.mb_words[0],
+				mqe->un.mb_words[1]);
+		} else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Send: cmd:x%x mb:x%x x%x",
+				mbx_cmnd, mqe->un.mb_words[0],
+				mqe->un.mb_words[1]);
+		}
+	}
+	psli->slistat.mbox_cmd++;
+
+	/* Post the mailbox command to the port */
+	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2533 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, MBX_NOWAIT);
+		goto out_not_finished;
+	}
+
+	return rc;
+
+out_not_finished:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (phba->sli.mbox_active) {
+		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		__lpfc_mbox_cmpl_put(phba, mboxq);
+		/* Release the token */
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		phba->sli.mbox_active = NULL;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+int
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+{
+	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
+}
+
+/**
+ * lpfc_mbox_api_table_setup - Set up mbox api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the mbox interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
+		phba->lpfc_sli_handle_slow_ring_event =
+				lpfc_sli_handle_slow_ring_event_s3;
+		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
+		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
+		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
+		phba->lpfc_sli_handle_slow_ring_event =
+				lpfc_sli_handle_slow_ring_event_s4;
+		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
+		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
+		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1420 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * __lpfc_sli_ringtx_put - Add an iocb to the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held to add a command
+ * iocb to the txq when SLI layer cannot submit the command iocb
+ * to the ring.
+ **/
+void
+__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		    struct lpfc_iocbq *piocb)
+{
+	lockdep_assert_held(&phba->hbalock);
+	/* Insert the caller's iocb in the txq tail for later processing. */
+	list_add_tail(&piocb->list, &pring->txq);
+}
+
+/**
+ * lpfc_sli_next_iocb - Get the next iocb in the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held before a new
+ * iocb is submitted to the firmware. This function checks
+ * txq to flush the iocbs in txq to Firmware before
+ * submitting new iocbs to the Firmware.
+ * If there are iocbs in the txq which need to be submitted
+ * to firmware, lpfc_sli_next_iocb returns the first element
+ * of the txq after dequeuing it from txq.
+ * If there is no iocb in the txq then the function will return
+ * *piocb and *piocb is set to NULL. Caller needs to check
+ * *piocb to find if there are more commands in the txq.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		   struct lpfc_iocbq **piocb)
+{
+	struct lpfc_iocbq * nextiocb;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	nextiocb = lpfc_sli_ringtx_get(phba, pring);
+	if (!nextiocb) {
+		nextiocb = *piocb;
+		*piocb = NULL;
+	}
+
+	return nextiocb;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
+ * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR. When the link is down,
+ * this function allows only iocbs for posting buffers. This function finds
+ * next available slot in the command ring and posts the command to the
+ * available slot and writes the port attention register to request HBA start
+ * processing new iocb. If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
+ * the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	struct lpfc_iocbq *nextiocb;
+	IOCB_t *iocb;
+	struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
+
+	lockdep_assert_held(&phba->hbalock);
+
+	if (piocb->iocb_cmpl && (!piocb->vport) &&
+	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+		lpfc_printf_log(phba, KERN_ERR,
+				LOG_SLI | LOG_VPORT,
+				"1807 IOCB x%x failed. No vport\n",
+				piocb->iocb.ulpCommand);
+		dump_stack();
+		return IOCB_ERROR;
+	}
+
+
+	/* If the PCI channel is in offline state, do not post iocbs. */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return IOCB_ERROR;
+
+	/* If HBA has a deferred error attention, fail the iocb. */
+	if (unlikely(phba->hba_flag & DEFER_ERATT))
+		return IOCB_ERROR;
+
+	/*
+	 * We should never get an IOCB if we are in a < LINK_DOWN state
+	 */
+	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		return IOCB_ERROR;
+
+	/*
+	 * Check to see if we are blocking IOCB processing because of a
+	 * outstanding event.
+	 */
+	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
+		goto iocb_busy;
+
+	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
+		/*
+		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
+		 * can be issued if the link is not up.
+		 */
+		switch (piocb->iocb.ulpCommand) {
+		case CMD_GEN_REQUEST64_CR:
+		case CMD_GEN_REQUEST64_CX:
+			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
+				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
+					FC_RCTL_DD_UNSOL_CMD) ||
+				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
+					MENLO_TRANSPORT_TYPE))
+
+				goto iocb_busy;
+			break;
+		case CMD_QUE_RING_BUF_CN:
+		case CMD_QUE_RING_BUF64_CN:
+			/*
+			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
+			 * completion, iocb_cmpl MUST be 0.
+			 */
+			if (piocb->iocb_cmpl)
+				piocb->iocb_cmpl = NULL;
+			/*FALLTHROUGH*/
+		case CMD_CREATE_XRI_CR:
+		case CMD_CLOSE_XRI_CN:
+		case CMD_CLOSE_XRI_CX:
+			break;
+		default:
+			goto iocb_busy;
+		}
+
+	/*
+	 * For FCP commands, we must be in a state where we can process link
+	 * attention events.
+	 */
+	} else if (unlikely(pring->ringno == LPFC_FCP_RING &&
+			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
+		goto iocb_busy;
+	}
+
+	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
+		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+	if (iocb)
+		lpfc_sli_update_ring(phba, pring);
+	else
+		lpfc_sli_update_full_ring(phba, pring);
+
+	if (!piocb)
+		return IOCB_SUCCESS;
+
+	goto out_busy;
+
+ iocb_busy:
+	pring->stats.iocb_cmd_delay++;
+
+ out_busy:
+
+	if (!(flag & SLI_IOCB_RET_IOCB)) {
+		__lpfc_sli_ringtx_put(phba, pring, piocb);
+		return IOCB_SUCCESS;
+	}
+
+	return IOCB_BUSY;
+}
+
+/**
+ * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the IOCB
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the IOCB contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the IOCB contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The IOCB is still in cpu endianess so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+**/
+static uint16_t
+lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+		struct lpfc_sglq *sglq)
+{
+	uint16_t xritag = NO_XRI;
+	struct ulp_bde64 *bpl = NULL;
+	struct ulp_bde64 bde;
+	struct sli4_sge *sgl  = NULL;
+	struct lpfc_dmabuf *dmabuf;
+	IOCB_t *icmd;
+	int numBdes = 0;
+	int i = 0;
+	uint32_t offset = 0; /* accumulated offset in the sg request list */
+	int inbound = 0; /* number of sg reply entries inbound from firmware */
+
+	if (!piocbq || !sglq)
+		return xritag;
+
+	sgl  = (struct sli4_sge *)sglq->sgl;
+	icmd = &piocbq->iocb;
+	if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+		return sglq->sli4_xritag;
+	if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		numBdes = icmd->un.genreq64.bdl.bdeSize /
+				sizeof(struct ulp_bde64);
+		/* The addrHigh and addrLow fields within the IOCB
+		 * have not been byteswapped yet so there is no
+		 * need to swap them back.
+		 */
+		if (piocbq->context3)
+			dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+		else
+			return xritag;
+
+		bpl  = (struct ulp_bde64 *)dmabuf->virt;
+		if (!bpl)
+			return xritag;
+
+		for (i = 0; i < numBdes; i++) {
+			/* Should already be byte swapped. */
+			sgl->addr_hi = bpl->addrHigh;
+			sgl->addr_lo = bpl->addrLow;
+
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			if ((i+1) == numBdes)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			/* swap the size field back to the cpu so we
+			 * can assign it to the sgl.
+			 */
+			bde.tus.w = le32_to_cpu(bpl->tus.w);
+			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+			/* The offsets in the sgl need to be accumulated
+			 * separately for the request and reply lists.
+			 * The request is always first, the reply follows.
+			 */
+			if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+				/* add up the reply sg entries */
+				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+					inbound++;
+				/* first inbound? reset the offset */
+				if (inbound == 1)
+					offset = 0;
+				bf_set(lpfc_sli4_sge_offset, sgl, offset);
+				bf_set(lpfc_sli4_sge_type, sgl,
+					LPFC_SGE_TYPE_DATA);
+				offset += bde.tus.f.bdeSize;
+			}
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			bpl++;
+			sgl++;
+		}
+	} else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
+			/* The addrHigh and addrLow fields of the BDE have not
+			 * been byteswapped yet so they need to be swapped
+			 * before putting them in the sgl.
+			 */
+			sgl->addr_hi =
+				cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
+			sgl->addr_lo =
+				cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len =
+				cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
+	}
+	return sglq->sli4_xritag;
+}
+
+/**
+ * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @wqe: Pointer to the work queue entry.
+ *
+ * This routine converts the iocb command to its Work Queue Entry
+ * equivalent. The wqe pointer should not have any fields set when
+ * this routine is called because it will memcpy over them.
+ * This routine does not set the CQ_ID or the WQEC bits in the
+ * wqe.
+ *
+ * Returns: 0 = Success, IOCB_ERROR = Failure.
+ **/
+static int
+lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
+		union lpfc_wqe *wqe)
+{
+	uint32_t xmit_len = 0, total_len = 0;
+	uint8_t ct = 0;
+	uint32_t fip;
+	uint32_t abort_tag;
+	uint8_t command_type = ELS_COMMAND_NON_FIP;
+	uint8_t cmnd;
+	uint16_t xritag;
+	uint16_t abrt_iotag;
+	struct lpfc_iocbq *abrtiocbq;
+	struct ulp_bde64 *bpl = NULL;
+	uint32_t els_id = LPFC_ELS_ID_DEFAULT;
+	int numBdes, i;
+	struct ulp_bde64 bde;
+	struct lpfc_nodelist *ndlp;
+	uint32_t *pcmd;
+	uint32_t if_type;
+
+	fip = phba->hba_flag & HBA_FIP_SUPPORT;
+	/* The fcp commands will set command type */
+	if (iocbq->iocb_flag &  LPFC_IO_FCP)
+		command_type = FCP_COMMAND;
+	else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
+		command_type = ELS_COMMAND_FIP;
+	else
+		command_type = ELS_COMMAND_NON_FIP;
+
+	if (phba->fcp_embed_io)
+		memset(wqe, 0, sizeof(union lpfc_wqe128));
+	/* Some of the fields are in the right position already */
+	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
+	if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
+		/* The ct field has moved so reset */
+		wqe->generic.wqe_com.word7 = 0;
+		wqe->generic.wqe_com.word10 = 0;
+	}
+
+	abort_tag = (uint32_t) iocbq->iotag;
+	xritag = iocbq->sli4_xritag;
+	/* words0-2 bpl convert bde */
+	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+				sizeof(struct ulp_bde64);
+		bpl  = (struct ulp_bde64 *)
+			((struct lpfc_dmabuf *)iocbq->context3)->virt;
+		if (!bpl)
+			return IOCB_ERROR;
+
+		/* Should already be byte swapped. */
+		wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
+		wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
+		/* swap the size field back to the cpu so we
+		 * can assign it to the sgl.
+		 */
+		wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
+		xmit_len = wqe->generic.bde.tus.f.bdeSize;
+		total_len = 0;
+		for (i = 0; i < numBdes; i++) {
+			bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
+			total_len += bde.tus.f.bdeSize;
+		}
+	} else
+		xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+
+	iocbq->iocb.ulpIoTag = iocbq->iotag;
+	cmnd = iocbq->iocb.ulpCommand;
+
+	switch (iocbq->iocb.ulpCommand) {
+	case CMD_ELS_REQUEST64_CR:
+		if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
+			ndlp = iocbq->context_un.ndlp;
+		else
+			ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		if (!iocbq->iocb.ulpLe) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2007 Only Limited Edition cmd Format"
+				" supported 0x%x\n",
+				iocbq->iocb.ulpCommand);
+			return IOCB_ERROR;
+		}
+
+		wqe->els_req.payload_len = xmit_len;
+		/* Els_reguest64 has a TMO */
+		bf_set(wqe_tmo, &wqe->els_req.wqe_com,
+			iocbq->iocb.ulpTimeout);
+		/* Need a VF for word 4 set the vf bit*/
+		bf_set(els_req64_vf, &wqe->els_req, 0);
+		/* And a VFID for word 12 */
+		bf_set(els_req64_vfid, &wqe->els_req, 0);
+		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+		       iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+		bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
+		/* CCP CCPE PV PRI in word10 were set in the memcpy */
+		if (command_type == ELS_COMMAND_FIP)
+			els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
+					>> LPFC_FIP_ELS_ID_SHIFT);
+		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+					iocbq->context2)->virt);
+		if_type = bf_get(lpfc_sli_intf_if_type,
+					&phba->sli4_hba.sli_intf);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+				*pcmd == ELS_CMD_SCR ||
+				*pcmd == ELS_CMD_FDISC ||
+				*pcmd == ELS_CMD_LOGO ||
+				*pcmd == ELS_CMD_PLOGI)) {
+				bf_set(els_req64_sp, &wqe->els_req, 1);
+				bf_set(els_req64_sid, &wqe->els_req,
+					iocbq->vport->fc_myDID);
+				if ((*pcmd == ELS_CMD_FLOGI) &&
+					!(phba->fc_topology ==
+						LPFC_TOPOLOGY_LOOP))
+					bf_set(els_req64_sid, &wqe->els_req, 0);
+				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+					phba->vpi_ids[iocbq->vport->vpi]);
+			} else if (pcmd && iocbq->context1) {
+				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+					phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+			}
+		}
+		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+		wqe->els_req.max_response_payload_len = total_len - xmit_len;
+		break;
+	case CMD_XMIT_SEQUENCE64_CX:
+		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+		       iocbq->iocb.un.ulpWord[3]);
+		bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+		       iocbq->iocb.unsli3.rcvsli3.ox_id);
+		/* The entire sequence is transmitted for this IOCB */
+		xmit_len = total_len;
+		cmnd = CMD_XMIT_SEQUENCE64_CR;
+		if (phba->link_flag & LS_LOOPBACK_MODE)
+			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+	case CMD_XMIT_SEQUENCE64_CR:
+		/* word3 iocb=io_tag32 wqe=reserved */
+		wqe->xmit_sequence.rsvd3 = 0;
+		/* word4 relative_offset memcpy */
+		/* word5 r_ctl/df_ctl memcpy */
+		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+		       LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+		       LPFC_WQE_LENLOC_WORD12);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+		wqe->xmit_sequence.xmit_len = xmit_len;
+		command_type = OTHER_COMMAND;
+		break;
+	case CMD_XMIT_BCAST64_CN:
+		/* word3 iocb=iotag32 wqe=seq_payload_len */
+		wqe->xmit_bcast64.seq_payload_len = xmit_len;
+		/* word4 iocb=rsvd wqe=rsvd */
+		/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
+		/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
+		bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
+		break;
+	case CMD_FCP_IWRITE64_CR:
+		command_type = FCP_COMMAND_DATA_OUT;
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		bf_set(payload_offset_len, &wqe->fcp_iwrite,
+		       xmit_len + sizeof(struct fcp_rsp));
+		bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+		       0);
+		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
+		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+		/* Always open the exchange */
+		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+		       LPFC_WQE_LENLOC_WORD4);
+		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+			if (iocbq->priority) {
+				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+				       (iocbq->priority << 1));
+			} else {
+				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
+		/* Note, word 10 is already initialized to 0 */
+
+		if (phba->fcp_embed_io) {
+			struct lpfc_scsi_buf *lpfc_cmd;
+			struct sli4_sge *sgl;
+			union lpfc_wqe128 *wqe128;
+			struct fcp_cmnd *fcp_cmnd;
+			uint32_t *ptr;
+
+			/* 128 byte wqe support here */
+			wqe128 = (union lpfc_wqe128 *)wqe;
+
+			lpfc_cmd = iocbq->context1;
+			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+			/* Word 0-2 - FCP_CMND */
+			wqe128->generic.bde.tus.f.bdeFlags =
+				BUFF_TYPE_BDE_IMMED;
+			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+			wqe128->generic.bde.addrHigh = 0;
+			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
+
+			bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
+
+			/* Word 22-29  FCP CMND Payload */
+			ptr = &wqe128->words[22];
+			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+		}
+		break;
+	case CMD_FCP_IREAD64_CR:
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		bf_set(payload_offset_len, &wqe->fcp_iread,
+		       xmit_len + sizeof(struct fcp_rsp));
+		bf_set(cmd_buff_len, &wqe->fcp_iread,
+		       0);
+		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
+		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
+		/* Always open the exchange */
+		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+		       LPFC_WQE_LENLOC_WORD4);
+		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+			bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+			if (iocbq->priority) {
+				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+				       (iocbq->priority << 1));
+			} else {
+				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
+		/* Note, word 10 is already initialized to 0 */
+
+		if (phba->fcp_embed_io) {
+			struct lpfc_scsi_buf *lpfc_cmd;
+			struct sli4_sge *sgl;
+			union lpfc_wqe128 *wqe128;
+			struct fcp_cmnd *fcp_cmnd;
+			uint32_t *ptr;
+
+			/* 128 byte wqe support here */
+			wqe128 = (union lpfc_wqe128 *)wqe;
+
+			lpfc_cmd = iocbq->context1;
+			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+			/* Word 0-2 - FCP_CMND */
+			wqe128->generic.bde.tus.f.bdeFlags =
+				BUFF_TYPE_BDE_IMMED;
+			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+			wqe128->generic.bde.addrHigh = 0;
+			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
+
+			bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
+
+			/* Word 22-29  FCP CMND Payload */
+			ptr = &wqe128->words[22];
+			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+		}
+		break;
+	case CMD_FCP_ICMND64_CR:
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		bf_set(payload_offset_len, &wqe->fcp_icmd,
+		       xmit_len + sizeof(struct fcp_rsp));
+		bf_set(cmd_buff_len, &wqe->fcp_icmd,
+		       0);
+		/* word3 iocb=IO_TAG wqe=reserved */
+		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+		/* Always open the exchange */
+		bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		if (iocbq->iocb_flag & LPFC_IO_OAS) {
+			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+			bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+			if (iocbq->priority) {
+				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+				       (iocbq->priority << 1));
+			} else {
+				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+				       (phba->cfg_XLanePriority << 1));
+			}
+		}
+		/* Note, word 10 is already initialized to 0 */
+
+		if (phba->fcp_embed_io) {
+			struct lpfc_scsi_buf *lpfc_cmd;
+			struct sli4_sge *sgl;
+			union lpfc_wqe128 *wqe128;
+			struct fcp_cmnd *fcp_cmnd;
+			uint32_t *ptr;
+
+			/* 128 byte wqe support here */
+			wqe128 = (union lpfc_wqe128 *)wqe;
+
+			lpfc_cmd = iocbq->context1;
+			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+			fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+			/* Word 0-2 - FCP_CMND */
+			wqe128->generic.bde.tus.f.bdeFlags =
+				BUFF_TYPE_BDE_IMMED;
+			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+			wqe128->generic.bde.addrHigh = 0;
+			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
+
+			bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
+
+			/* Word 22-29  FCP CMND Payload */
+			ptr = &wqe128->words[22];
+			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+		}
+		break;
+	case CMD_GEN_REQUEST64_CR:
+		/* For this command calculate the xmit length of the
+		 * request bde.
+		 */
+		xmit_len = 0;
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+			sizeof(struct ulp_bde64);
+		for (i = 0; i < numBdes; i++) {
+			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+			if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+				break;
+			xmit_len += bde.tus.f.bdeSize;
+		}
+		/* word3 iocb=IO_TAG wqe=request_payload_len */
+		wqe->gen_req.request_payload_len = xmit_len;
+		/* word4 iocb=parameter wqe=relative_offset memcpy */
+		/* word5 [rctl, type, df_ctl, la] copied in memcpy */
+		/* word6 context tag copied in memcpy */
+		if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
+			ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2015 Invalid CT %x command 0x%x\n",
+				ct, iocbq->iocb.ulpCommand);
+			return IOCB_ERROR;
+		}
+		bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+		bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+		bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+		wqe->gen_req.max_response_payload_len = total_len - xmit_len;
+		command_type = OTHER_COMMAND;
+		break;
+	case CMD_XMIT_ELS_RSP64_CX:
+		ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		/* words0-2 BDE memcpy */
+		/* word3 iocb=iotag32 wqe=response_payload_len */
+		wqe->xmit_els_rsp.response_payload_len = xmit_len;
+		/* word4 */
+		wqe->xmit_els_rsp.word4 = 0;
+		/* word5 iocb=rsvd wge=did */
+		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+			 iocbq->iocb.un.xseq64.xmit_els_remoteID);
+
+		if_type = bf_get(lpfc_sli_intf_if_type,
+					&phba->sli4_hba.sli_intf);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			if (iocbq->vport->fc_flag & FC_PT2PT) {
+				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+					iocbq->vport->fc_myDID);
+				if (iocbq->vport->fc_myDID == Fabric_DID) {
+					bf_set(wqe_els_did,
+						&wqe->xmit_els_rsp.wqe_dest, 0);
+				}
+			}
+		}
+		bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+		       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+		       iocbq->iocb.unsli3.rcvsli3.ox_id);
+		if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
+			bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+			       phba->vpi_ids[iocbq->vport->vpi]);
+		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+		bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+					iocbq->context2)->virt);
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+					iocbq->vport->fc_myDID);
+				bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+					phba->vpi_ids[phba->pport->vpi]);
+		}
+		command_type = OTHER_COMMAND;
+		break;
+	case CMD_CLOSE_XRI_CN:
+	case CMD_ABORT_XRI_CN:
+	case CMD_ABORT_XRI_CX:
+		/* words 0-2 memcpy should be 0 rserved */
+		/* port will send abts */
+		abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
+		if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
+			abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
+			fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
+		} else
+			fip = 0;
+
+		if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
+			/*
+			 * The link is down, or the command was ELS_FIP
+			 * so the fw does not need to send abts
+			 * on the wire.
+			 */
+			bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+		else
+			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+		/* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+		wqe->abort_cmd.rsrvd5 = 0;
+		bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+		/*
+		 * The abort handler will send us CMD_ABORT_XRI_CN or
+		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
+		 */
+		bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+		bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		cmnd = CMD_ABORT_XRI_CX;
+		command_type = OTHER_COMMAND;
+		xritag = 0;
+		break;
+	case CMD_XMIT_BLS_RSP64_CX:
+		ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		/* As BLS ABTS RSP WQE is very different from other WQEs,
+		 * we re-construct this WQE here based on information in
+		 * iocbq from scratch.
+		 */
+		memset(wqe, 0, sizeof(union lpfc_wqe));
+		/* OX_ID is invariable to who sent ABTS to CT exchange */
+		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
+		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
+		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
+		    LPFC_ABTS_UNSOL_INT) {
+			/* ABTS sent by initiator to CT exchange, the
+			 * RX_ID field will be filled with the newly
+			 * allocated responder XRI.
+			 */
+			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+			       iocbq->sli4_xritag);
+		} else {
+			/* ABTS sent by responder to CT exchange, the
+			 * RX_ID field will be filled with the responder
+			 * RX_ID from ABTS.
+			 */
+			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
+		}
+		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+
+		/* Use CT=VPI */
+		bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
+			ndlp->nlp_DID);
+		bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
+			iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+			phba->vpi_ids[phba->pport->vpi]);
+		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
+		command_type = OTHER_COMMAND;
+		if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
+			bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
+			bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
+			bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
+		}
+
+		break;
+	case CMD_SEND_FRAME:
+		bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+		bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+		return 0;
+	case CMD_XRI_ABORTED_CX:
+	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
+	case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
+	case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
+	case CMD_FCP_TRSP64_CX: /* Target mode rcv */
+	case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2014 Invalid command 0x%x\n",
+				iocbq->iocb.ulpCommand);
+		return IOCB_ERROR;
+		break;
+	}
+
+	if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
+		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+	else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
+		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+	else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
+		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+	iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
+			      LPFC_IO_DIF_INSERT);
+	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+	bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+	wqe->generic.wqe_com.abort_tag = abort_tag;
+	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+	bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+	bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+	return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
+			 struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	struct lpfc_sglq *sglq;
+	union lpfc_wqe *wqe;
+	union lpfc_wqe128 wqe128;
+	struct lpfc_queue *wq;
+	struct lpfc_sli_ring *pring;
+
+	/* Get the WQ */
+	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+		if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
+			wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
+		else
+			wq = phba->sli4_hba.oas_wq;
+	} else {
+		wq = phba->sli4_hba.els_wq;
+	}
+
+	/* Get corresponding ring */
+	pring = wq->pring;
+
+	/*
+	 * The WQE can be either 64 or 128 bytes,
+	 * so allocate space on the stack assuming the largest.
+	 */
+	wqe = (union lpfc_wqe *)&wqe128;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	if (piocb->sli4_xritag == NO_XRI) {
+		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+			sglq = NULL;
+		else {
+			if (!list_empty(&pring->txq)) {
+				if (!(flag & SLI_IOCB_RET_IOCB)) {
+					__lpfc_sli_ringtx_put(phba,
+						pring, piocb);
+					return IOCB_SUCCESS;
+				} else {
+					return IOCB_BUSY;
+				}
+			} else {
+				sglq = __lpfc_sli_get_els_sglq(phba, piocb);
+				if (!sglq) {
+					if (!(flag & SLI_IOCB_RET_IOCB)) {
+						__lpfc_sli_ringtx_put(phba,
+								pring,
+								piocb);
+						return IOCB_SUCCESS;
+					} else
+						return IOCB_BUSY;
+				}
+			}
+		}
+	} else if (piocb->iocb_flag &  LPFC_IO_FCP)
+		/* These IO's already have an XRI and a mapped sgl. */
+		sglq = NULL;
+	else {
+		/*
+		 * This is a continuation of a commandi,(CX) so this
+		 * sglq is on the active list
+		 */
+		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
+		if (!sglq)
+			return IOCB_ERROR;
+	}
+
+	if (sglq) {
+		piocb->sli4_lxritag = sglq->sli4_lxritag;
+		piocb->sli4_xritag = sglq->sli4_xritag;
+		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+			return IOCB_ERROR;
+	}
+
+	if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
+		return IOCB_ERROR;
+
+	if (lpfc_sli4_wq_put(wq, wqe))
+		return IOCB_ERROR;
+	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
+
+	return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
+ **/
+int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1419 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+	return 0;
+}
+
+/**
+ * lpfc_sli4_calc_ring - Calculates which ring to use
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ *
+ * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
+ * hba_wqidx, thus we need to calculate the corresponding ring.
+ * Since ABORTS must go on the same WQ of the command they are
+ * aborting, we use command's hba_wqidx.
+ */
+struct lpfc_sli_ring *
+lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+{
+	if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+		if (!(phba->cfg_fof) ||
+		    (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+			if (unlikely(!phba->sli4_hba.fcp_wq))
+				return NULL;
+			/*
+			 * for abort iocb hba_wqidx should already
+			 * be setup based on what work queue we used.
+			 */
+			if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+				piocb->hba_wqidx =
+					lpfc_sli4_scmd_to_wqidx_distr(phba,
+							      piocb->context1);
+				piocb->hba_wqidx = piocb->hba_wqidx %
+					phba->cfg_fcp_io_channel;
+			}
+			return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
+		} else {
+			if (unlikely(!phba->sli4_hba.oas_wq))
+				return NULL;
+			piocb->hba_wqidx = 0;
+			return phba->sli4_hba.oas_wq->pring;
+		}
+	} else {
+		if (unlikely(!phba->sli4_hba.els_wq))
+			return NULL;
+		piocb->hba_wqidx = 0;
+		return phba->sli4_hba.els_wq->pring;
+	}
+}
+
+/**
+ * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	struct lpfc_hba_eq_hdl *hba_eq_hdl;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_queue *fpeq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflags;
+	int rc, idx;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		pring = lpfc_sli4_calc_ring(phba, piocb);
+		if (unlikely(pring == NULL))
+			return IOCB_ERROR;
+
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+		if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
+			idx = piocb->hba_wqidx;
+			hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
+
+			if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
+
+				/* Get associated EQ with this index */
+				fpeq = phba->sli4_hba.hba_eq[idx];
+
+				/* Turn off interrupts from this EQ */
+				lpfc_sli4_eq_clr_intr(fpeq);
+
+				/*
+				 * Process all the events on FCP EQ
+				 */
+				while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+					lpfc_sli4_hba_handle_eqe(phba,
+						eqe, idx);
+					fpeq->EQ_processed++;
+				}
+
+				/* Always clear and re-arm the EQ */
+				lpfc_sli4_eq_release(fpeq,
+					LPFC_QUEUE_REARM);
+			}
+			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
+		}
+	} else {
+		/* For now, SLI2/3 will still use hbalock */
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_extra_ring_setup - Extra ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held. SLI3 only.
+ **/
+static int
+lpfc_extra_ring_setup( struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+
+	psli = &phba->sli;
+
+	/* Adjust cmd/rsp ring iocb entries more evenly */
+
+	/* Take some away from the FCP ring */
+	pring = &psli->sli3_ring[LPFC_FCP_RING];
+	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+	/* and give them to the extra ring */
+	pring = &psli->sli3_ring[LPFC_EXTRA_RING];
+
+	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+	/* Setup default profile for this ring */
+	pring->iotag_max = 4096;
+	pring->num_mask = 1;
+	pring->prt[0].profile = 0;      /* Mask 0 */
+	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
+	pring->prt[0].type = phba->cfg_multi_ring_type;
+	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
+	return 0;
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port.  The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession.  The abort could be originated by the
+ * driver or by the port.  The ABTS could have been for an ELS
+ * or FCP IO.  The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+			  struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+	uint16_t rpi = 0, vpi = 0;
+	struct lpfc_vport *vport = NULL;
+
+	/* The rpi in the ulpContext is vport-sensitive. */
+	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+	rpi = iocbq->iocb.ulpContext;
+
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"3092 Port generated ABTS async event "
+			"on vpi %d rpi %d status 0x%x\n",
+			vpi, rpi, iocbq->iocb.ulpStatus);
+
+	vport = lpfc_find_vport_by_vpid(phba, vpi);
+	if (!vport)
+		goto err_exit;
+	ndlp = lpfc_findnode_rpi(vport, rpi);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		goto err_exit;
+
+	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+		lpfc_sli_abts_recover_port(vport, ndlp);
+	return;
+
+ err_exit:
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3095 Event Context not found, no "
+			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+			iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+			vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port.  The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply.  The abort could be originated
+ * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+			   struct lpfc_nodelist *ndlp,
+			   struct sli4_wcqe_xri_aborted *axri)
+{
+	struct lpfc_vport *vport;
+	uint32_t ext_status = 0;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3115 Node Context not found, driver "
+				"ignoring abts err event\n");
+		return;
+	}
+
+	vport = ndlp->vport;
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"3116 Port generated FCP XRI ABORT event on "
+			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
+			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+			bf_get(lpfc_wcqe_xa_xri, axri),
+			bf_get(lpfc_wcqe_xa_status, axri),
+			axri->parameter);
+
+	/*
+	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
+	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+	 */
+	ext_status = axri->parameter & IOERR_PARAM_MASK;
+	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
+		lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
+/**
+ * lpfc_sli_async_event_handler - ASYNC iocb handler function
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * This function is called by the slow ring event handler
+ * function when there is an ASYNC event iocb in the ring.
+ * This function is called with no lock held.
+ * Currently this function handles only temperature related
+ * ASYNC events. The function decodes the temperature sensor
+ * event message and posts events for the management applications.
+ **/
+static void
+lpfc_sli_async_event_handler(struct lpfc_hba * phba,
+	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
+{
+	IOCB_t *icmd;
+	uint16_t evt_code;
+	struct temp_event temp_event_data;
+	struct Scsi_Host *shost;
+	uint32_t *iocb_w;
+
+	icmd = &iocbq->iocb;
+	evt_code = icmd->un.asyncstat.evt_code;
+
+	switch (evt_code) {
+	case ASYNC_TEMP_WARN:
+	case ASYNC_TEMP_SAFE:
+		temp_event_data.data = (uint32_t) icmd->ulpContext;
+		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+		if (evt_code == ASYNC_TEMP_WARN) {
+			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+				"0347 Adapter is very hot, please take "
+				"corrective action. temperature : %d Celsius\n",
+				(uint32_t) icmd->ulpContext);
+		} else {
+			temp_event_data.event_code = LPFC_NORMAL_TEMP;
+			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+				"0340 Adapter temperature is OK now. "
+				"temperature : %d Celsius\n",
+				(uint32_t) icmd->ulpContext);
+		}
+
+		/* Send temperature change event to applications */
+		shost = lpfc_shost_from_vport(phba->pport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+			sizeof(temp_event_data), (char *) &temp_event_data,
+			LPFC_NL_VENDOR_ID);
+		break;
+	case ASYNC_STATUS_CN:
+		lpfc_sli_abts_err_handler(phba, iocbq);
+		break;
+	default:
+		iocb_w = (uint32_t *) icmd;
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0346 Ring %d handler: unexpected ASYNC_STATUS"
+			" evt_code 0x%x\n"
+			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
+			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
+			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
+			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
+			pring->ringno, icmd->un.asyncstat.evt_code,
+			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
+			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
+			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
+			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
+
+		break;
+	}
+}
+
+
+/**
+ * lpfc_sli4_setup - SLI ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0.
+ **/
+int
+lpfc_sli4_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring *pring;
+
+	pring = phba->sli4_hba.els_wq->pring;
+	pring->num_mask = LPFC_MAX_RING_MASK;
+	pring->prt[0].profile = 0;	/* Mask 0 */
+	pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+	pring->prt[0].type = FC_TYPE_ELS;
+	pring->prt[0].lpfc_sli_rcv_unsol_event =
+	    lpfc_els_unsol_event;
+	pring->prt[1].profile = 0;	/* Mask 1 */
+	pring->prt[1].rctl = FC_RCTL_ELS_REP;
+	pring->prt[1].type = FC_TYPE_ELS;
+	pring->prt[1].lpfc_sli_rcv_unsol_event =
+	    lpfc_els_unsol_event;
+	pring->prt[2].profile = 0;	/* Mask 2 */
+	/* NameServer Inquiry */
+	pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
+	/* NameServer */
+	pring->prt[2].type = FC_TYPE_CT;
+	pring->prt[2].lpfc_sli_rcv_unsol_event =
+	    lpfc_ct_unsol_event;
+	pring->prt[3].profile = 0;	/* Mask 3 */
+	/* NameServer response */
+	pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
+	/* NameServer */
+	pring->prt[3].type = FC_TYPE_CT;
+	pring->prt[3].lpfc_sli_rcv_unsol_event =
+	    lpfc_ct_unsol_event;
+	return 0;
+}
+
+/**
+ * lpfc_sli_setup - SLI ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0. SLI3 only.
+ **/
+int
+lpfc_sli_setup(struct lpfc_hba *phba)
+{
+	int i, totiocbsize = 0;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+
+	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
+	psli->sli_flag = 0;
+
+	psli->iocbq_lookup = NULL;
+	psli->iocbq_lookup_len = 0;
+	psli->last_iotag = 0;
+
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->sli3_ring[i];
+		switch (i) {
+		case LPFC_FCP_RING:	/* ring 0 - FCP */
+			/* numCiocb and numRiocb are used in config_port */
+			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+			pring->sli.sli3.numCiocb +=
+				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+			pring->sli.sli3.numRiocb +=
+				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+			pring->sli.sli3.numCiocb +=
+				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+			pring->sli.sli3.numRiocb +=
+				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+			pring->iotag_ctr = 0;
+			pring->iotag_max =
+			    (phba->cfg_hba_queue_depth * 2);
+			pring->fast_iotag = pring->iotag_max;
+			pring->num_mask = 0;
+			break;
+		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
+			/* numCiocb and numRiocb are used in config_port */
+			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+			pring->iotag_max = phba->cfg_hba_queue_depth;
+			pring->num_mask = 0;
+			break;
+		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
+			/* numCiocb and numRiocb are used in config_port */
+			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+			pring->fast_iotag = 0;
+			pring->iotag_ctr = 0;
+			pring->iotag_max = 4096;
+			pring->lpfc_sli_rcv_async_status =
+				lpfc_sli_async_event_handler;
+			pring->num_mask = LPFC_MAX_RING_MASK;
+			pring->prt[0].profile = 0;	/* Mask 0 */
+			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+			pring->prt[0].type = FC_TYPE_ELS;
+			pring->prt[0].lpfc_sli_rcv_unsol_event =
+			    lpfc_els_unsol_event;
+			pring->prt[1].profile = 0;	/* Mask 1 */
+			pring->prt[1].rctl = FC_RCTL_ELS_REP;
+			pring->prt[1].type = FC_TYPE_ELS;
+			pring->prt[1].lpfc_sli_rcv_unsol_event =
+			    lpfc_els_unsol_event;
+			pring->prt[2].profile = 0;	/* Mask 2 */
+			/* NameServer Inquiry */
+			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
+			/* NameServer */
+			pring->prt[2].type = FC_TYPE_CT;
+			pring->prt[2].lpfc_sli_rcv_unsol_event =
+			    lpfc_ct_unsol_event;
+			pring->prt[3].profile = 0;	/* Mask 3 */
+			/* NameServer response */
+			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
+			/* NameServer */
+			pring->prt[3].type = FC_TYPE_CT;
+			pring->prt[3].lpfc_sli_rcv_unsol_event =
+			    lpfc_ct_unsol_event;
+			break;
+		}
+		totiocbsize += (pring->sli.sli3.numCiocb *
+			pring->sli.sli3.sizeCiocb) +
+			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
+	}
+	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
+		/* Too many cmd / rsp ring entries in SLI2 SLIM */
+		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
+		       "SLI2 SLIM Data: x%x x%lx\n",
+		       phba->brd_no, totiocbsize,
+		       (unsigned long) MAX_SLIM_IOCB_SIZE);
+	}
+	if (phba->cfg_multi_ring_support == 2)
+		lpfc_extra_ring_setup(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_queue_init - Queue initialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
+void
+lpfc_sli4_queue_init(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	int i;
+
+	psli = &phba->sli;
+	spin_lock_irq(&phba->hbalock);
+	INIT_LIST_HEAD(&psli->mboxq);
+	INIT_LIST_HEAD(&psli->mboxq_cmpl);
+	/* Initialize list headers for txq and txcmplq as double linked lists */
+	for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+		pring = phba->sli4_hba.fcp_wq[i]->pring;
+		pring->flag = 0;
+		pring->ringno = LPFC_FCP_RING;
+		INIT_LIST_HEAD(&pring->txq);
+		INIT_LIST_HEAD(&pring->txcmplq);
+		INIT_LIST_HEAD(&pring->iocb_continueq);
+		spin_lock_init(&pring->ring_lock);
+	}
+	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+		pring = phba->sli4_hba.nvme_wq[i]->pring;
+		pring->flag = 0;
+		pring->ringno = LPFC_FCP_RING;
+		INIT_LIST_HEAD(&pring->txq);
+		INIT_LIST_HEAD(&pring->txcmplq);
+		INIT_LIST_HEAD(&pring->iocb_continueq);
+		spin_lock_init(&pring->ring_lock);
+	}
+	pring = phba->sli4_hba.els_wq->pring;
+	pring->flag = 0;
+	pring->ringno = LPFC_ELS_RING;
+	INIT_LIST_HEAD(&pring->txq);
+	INIT_LIST_HEAD(&pring->txcmplq);
+	INIT_LIST_HEAD(&pring->iocb_continueq);
+	spin_lock_init(&pring->ring_lock);
+
+	if (phba->cfg_nvme_io_channel) {
+		pring = phba->sli4_hba.nvmels_wq->pring;
+		pring->flag = 0;
+		pring->ringno = LPFC_ELS_RING;
+		INIT_LIST_HEAD(&pring->txq);
+		INIT_LIST_HEAD(&pring->txcmplq);
+		INIT_LIST_HEAD(&pring->iocb_continueq);
+		spin_lock_init(&pring->ring_lock);
+	}
+
+	if (phba->cfg_fof) {
+		pring = phba->sli4_hba.oas_wq->pring;
+		pring->flag = 0;
+		pring->ringno = LPFC_FCP_RING;
+		INIT_LIST_HEAD(&pring->txq);
+		INIT_LIST_HEAD(&pring->txcmplq);
+		INIT_LIST_HEAD(&pring->iocb_continueq);
+		spin_lock_init(&pring->ring_lock);
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli_queue_init - Queue initialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
+void
+lpfc_sli_queue_init(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	int i;
+
+	psli = &phba->sli;
+	spin_lock_irq(&phba->hbalock);
+	INIT_LIST_HEAD(&psli->mboxq);
+	INIT_LIST_HEAD(&psli->mboxq_cmpl);
+	/* Initialize list headers for txq and txcmplq as double linked lists */
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->sli3_ring[i];
+		pring->ringno = i;
+		pring->sli.sli3.next_cmdidx  = 0;
+		pring->sli.sli3.local_getidx = 0;
+		pring->sli.sli3.cmdidx = 0;
+		INIT_LIST_HEAD(&pring->iocb_continueq);
+		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
+		INIT_LIST_HEAD(&pring->postbufq);
+		pring->flag = 0;
+		INIT_LIST_HEAD(&pring->txq);
+		INIT_LIST_HEAD(&pring->txcmplq);
+		spin_lock_init(&pring->ring_lock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine flushes the mailbox command subsystem. It will unconditionally
+ * flush all the mailbox commands in the three possible stages in the mailbox
+ * command sub-system: pending mailbox command queue; the outstanding mailbox
+ * command; and completed mailbox command queue. It is caller's responsibility
+ * to make sure that the driver is in the proper state to flush the mailbox
+ * command sub-system. Namely, the posting of mailbox commands into the
+ * pending mailbox command queue from the various clients must be stopped;
+ * either the HBA is in a state that it will never works on the outstanding
+ * mailbox command (such as in EEH or ERATT conditions) or the outstanding
+ * mailbox command has been completed.
+ **/
+static void
+lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	unsigned long iflag;
+
+	/* Flush all the mailbox commands in the mbox system */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	/* The pending mailbox command queue */
+	list_splice_init(&phba->sli.mboxq, &completions);
+	/* The outstanding active mailbox command */
+	if (psli->mbox_active) {
+		list_add_tail(&psli->mbox_active->list, &completions);
+		psli->mbox_active = NULL;
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	}
+	/* The completed mailbox command queue */
+	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
+	while (!list_empty(&completions)) {
+		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
+		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		if (pmb->mbox_cmpl)
+			pmb->mbox_cmpl(phba, pmb);
+	}
+}
+
+/**
+ * lpfc_sli_host_down - Vport cleanup function
+ * @vport: Pointer to virtual port object.
+ *
+ * lpfc_sli_host_down is called to clean up the resources
+ * associated with a vport before destroying virtual
+ * port data structures.
+ * This function does following operations:
+ * - Free discovery resources associated with this virtual
+ *   port.
+ * - Free iocbs associated with this virtual port in
+ *   the txq.
+ * - Send abort for all iocb commands associated with this
+ *   vport in txcmplq.
+ *
+ * This function is called with no lock held and always returns 1.
+ **/
+int
+lpfc_sli_host_down(struct lpfc_vport *vport)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_queue *qp = NULL;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *iocb, *next_iocb;
+	int i;
+	unsigned long flags = 0;
+	uint16_t prev_pring_flag;
+
+	lpfc_cleanup_discovery_resources(vport);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+
+	/*
+	 * Error everything on the txq since these iocbs
+	 * have not been given to the FW yet.
+	 * Also issue ABTS for everything on the txcmplq
+	 */
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		for (i = 0; i < psli->num_rings; i++) {
+			pring = &psli->sli3_ring[i];
+			prev_pring_flag = pring->flag;
+			/* Only slow rings */
+			if (pring->ringno == LPFC_ELS_RING) {
+				pring->flag |= LPFC_DEFERRED_RING_EVENT;
+				/* Set the lpfc data pending flag */
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
+			}
+			list_for_each_entry_safe(iocb, next_iocb,
+						 &pring->txq, list) {
+				if (iocb->vport != vport)
+					continue;
+				list_move_tail(&iocb->list, &completions);
+			}
+			list_for_each_entry_safe(iocb, next_iocb,
+						 &pring->txcmplq, list) {
+				if (iocb->vport != vport)
+					continue;
+				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+			}
+			pring->flag = prev_pring_flag;
+		}
+	} else {
+		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+			pring = qp->pring;
+			if (!pring)
+				continue;
+			if (pring == phba->sli4_hba.els_wq->pring) {
+				pring->flag |= LPFC_DEFERRED_RING_EVENT;
+				/* Set the lpfc data pending flag */
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
+			}
+			prev_pring_flag = pring->flag;
+			spin_lock_irq(&pring->ring_lock);
+			list_for_each_entry_safe(iocb, next_iocb,
+						 &pring->txq, list) {
+				if (iocb->vport != vport)
+					continue;
+				list_move_tail(&iocb->list, &completions);
+			}
+			spin_unlock_irq(&pring->ring_lock);
+			list_for_each_entry_safe(iocb, next_iocb,
+						 &pring->txcmplq, list) {
+				if (iocb->vport != vport)
+					continue;
+				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+			}
+			pring->flag = prev_pring_flag;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_DOWN);
+	return 1;
+}
+
+/**
+ * lpfc_sli_hba_down - Resource cleanup function for the HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all iocb, buffers, mailbox commands
+ * while shutting down the HBA. This function is called with no
+ * lock held and always returns 1.
+ * This function does the following to cleanup driver resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry
+ *   in the list.
+ * - Free up any buffer posted to the HBA
+ * - Free mailbox commands in the mailbox queue.
+ **/
+int
+lpfc_sli_hba_down(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_queue *qp = NULL;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_dmabuf *buf_ptr;
+	unsigned long flags = 0;
+	int i;
+
+	/* Shutdown the mailbox command sub-system */
+	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
+
+	lpfc_hba_down_prep(phba);
+
+	lpfc_fabric_abort_hba(phba);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+
+	/*
+	 * Error everything on the txq since these iocbs
+	 * have not been given to the FW yet.
+	 */
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		for (i = 0; i < psli->num_rings; i++) {
+			pring = &psli->sli3_ring[i];
+			/* Only slow rings */
+			if (pring->ringno == LPFC_ELS_RING) {
+				pring->flag |= LPFC_DEFERRED_RING_EVENT;
+				/* Set the lpfc data pending flag */
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
+			}
+			list_splice_init(&pring->txq, &completions);
+		}
+	} else {
+		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+			pring = qp->pring;
+			if (!pring)
+				continue;
+			spin_lock_irq(&pring->ring_lock);
+			list_splice_init(&pring->txq, &completions);
+			spin_unlock_irq(&pring->ring_lock);
+			if (pring == phba->sli4_hba.els_wq->pring) {
+				pring->flag |= LPFC_DEFERRED_RING_EVENT;
+				/* Set the lpfc data pending flag */
+				set_bit(LPFC_DATA_READY, &phba->data_flags);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_DOWN);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_splice_init(&phba->elsbuf, &completions);
+	phba->elsbuf_cnt = 0;
+	phba->elsbuf_prev_cnt = 0;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	while (!list_empty(&completions)) {
+		list_remove_head(&completions, buf_ptr,
+			struct lpfc_dmabuf, list);
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+	}
+
+	/* Return any active mbox cmds */
+	del_timer_sync(&psli->mbox_tmo);
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+	return 1;
+}
+
+/**
+ * lpfc_sli_pcimem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between driver memory
+ * and the SLI memory. This function also changes the endianness
+ * of each word if native endianness is different from SLI
+ * endianness. This function can be called with or without
+ * lock.
+ **/
+void
+lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+	uint32_t *src = srcp;
+	uint32_t *dest = destp;
+	uint32_t ldata;
+	int i;
+
+	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+		ldata = *src;
+		ldata = le32_to_cpu(ldata);
+		*dest = ldata;
+		src++;
+		dest++;
+	}
+}
+
+
+/**
+ * lpfc_sli_bemem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between a data structure
+ * with big endian representation to local endianness.
+ * This function can be called with or without lock.
+ **/
+void
+lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+	uint32_t *src = srcp;
+	uint32_t *dest = destp;
+	uint32_t ldata;
+	int i;
+
+	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
+		ldata = *src;
+		ldata = be32_to_cpu(ldata);
+		*dest = ldata;
+		src++;
+		dest++;
+	}
+}
+
+/**
+ * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mp: Pointer to driver buffer object.
+ *
+ * This function is called with no lock held.
+ * It always return zero after adding the buffer to the postbufq
+ * buffer list.
+ **/
+int
+lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 struct lpfc_dmabuf *mp)
+{
+	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
+	   later */
+	spin_lock_irq(&phba->hbalock);
+	list_add_tail(&mp->list, &pring->postbufq);
+	pring->postbufq_cnt++;
+	spin_unlock_irq(&phba->hbalock);
+	return 0;
+}
+
+/**
+ * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
+ * @phba: Pointer to HBA context object.
+ *
+ * When HBQ is enabled, buffers are searched based on tags. This function
+ * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
+ * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
+ * does not conflict with tags of buffer posted for unsolicited events.
+ * The function returns the allocated tag. The function is called with
+ * no locks held.
+ **/
+uint32_t
+lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
+{
+	spin_lock_irq(&phba->hbalock);
+	phba->buffer_tag_count++;
+	/*
+	 * Always set the QUE_BUFTAG_BIT to distiguish between
+	 * a tag assigned by HBQ.
+	 */
+	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
+	spin_unlock_irq(&phba->hbalock);
+	return phba->buffer_tag_count;
+}
+
+/**
+ * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: Buffer tag.
+ *
+ * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
+ * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
+ * iocb is posted to the response ring with the tag of the buffer.
+ * This function searches the pring->postbufq list using the tag
+ * to find buffer associated with CMD_IOCB_RET_XRI64_CX
+ * iocb. If the buffer is found then lpfc_dmabuf object of the
+ * buffer is returned to the caller else NULL is returned.
+ * This function is called with no lock held.
+ **/
+struct lpfc_dmabuf *
+lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			uint32_t tag)
+{
+	struct lpfc_dmabuf *mp, *next_mp;
+	struct list_head *slp = &pring->postbufq;
+
+	/* Search postbufq, from the beginning, looking for a match on tag */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+		if (mp->buffer_tag == tag) {
+			list_del_init(&mp->list);
+			pring->postbufq_cnt--;
+			spin_unlock_irq(&phba->hbalock);
+			return mp;
+		}
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0402 Cannot find virtual addr for buffer tag on "
+			"ring %d Data x%lx x%p x%p x%x\n",
+			pring->ringno, (unsigned long) tag,
+			slp->next, slp->prev, pring->postbufq_cnt);
+
+	return NULL;
+}
+
+/**
+ * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @phys: DMA address of the buffer.
+ *
+ * This function searches the buffer list using the dma_address
+ * of unsolicited event to find the driver's lpfc_dmabuf object
+ * corresponding to the dma_address. The function returns the
+ * lpfc_dmabuf object if a buffer is found else it returns NULL.
+ * This function is called by the ct and els unsolicited event
+ * handlers to get the buffer associated with the unsolicited
+ * event.
+ *
+ * This function is called with no lock held.
+ **/
+struct lpfc_dmabuf *
+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 dma_addr_t phys)
+{
+	struct lpfc_dmabuf *mp, *next_mp;
+	struct list_head *slp = &pring->postbufq;
+
+	/* Search postbufq, from the beginning, looking for a match on phys */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+		if (mp->phys == phys) {
+			list_del_init(&mp->list);
+			pring->postbufq_cnt--;
+			spin_unlock_irq(&phba->hbalock);
+			return mp;
+		}
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0410 Cannot find virtual addr for mapped buf on "
+			"ring %d Data x%llx x%p x%p x%x\n",
+			pring->ringno, (unsigned long long)phys,
+			slp->next, slp->prev, pring->postbufq_cnt);
+	return NULL;
+}
+
+/**
+ * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * This function is the completion handler for the abort iocbs for
+ * ELS commands. This function is called from the ELS ring event
+ * handler with no lock held. This function frees memory resources
+ * associated with the abort iocb.
+ **/
+static void
+lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	uint16_t abort_iotag, abort_context;
+	struct lpfc_iocbq *abort_iocb = NULL;
+
+	if (irsp->ulpStatus) {
+
+		/*
+		 * Assume that the port already completed and returned, or
+		 * will return the iocb. Just Log the message.
+		 */
+		abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
+		abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
+
+		spin_lock_irq(&phba->hbalock);
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (abort_iotag != 0 &&
+				abort_iotag <= phba->sli.last_iotag)
+				abort_iocb =
+					phba->sli.iocbq_lookup[abort_iotag];
+		} else
+			/* For sli4 the abort_tag is the XRI,
+			 * so the abort routine puts the iotag  of the iocb
+			 * being aborted in the context field of the abort
+			 * IOCB.
+			 */
+			abort_iocb = phba->sli.iocbq_lookup[abort_context];
+
+		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
+				"0327 Cannot abort els iocb %p "
+				"with tag %x context %x, abort status %x, "
+				"abort code %x\n",
+				abort_iocb, abort_iotag, abort_context,
+				irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+		spin_unlock_irq(&phba->hbalock);
+	}
+	lpfc_sli_release_iocbq(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for ELS commands
+ * which are aborted. The function frees memory resources used for
+ * the aborted ELS commands.
+ **/
+static void
+lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		     struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+
+	/* ELS cmd tag <ulpIoTag> completes */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0139 Ignoring ELS cmd tag x%x completion Data: "
+			"x%x x%x x%x\n",
+			irsp->ulpIoTag, irsp->ulpStatus,
+			irsp->un.ulpWord[4], irsp->ulpTimeout);
+	if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+		lpfc_ct_free_iocb(phba, cmdiocb);
+	else
+		lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
+ **/
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			   struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct lpfc_iocbq *abtsiocbp;
+	IOCB_t *icmd = NULL;
+	IOCB_t *iabt = NULL;
+	int retval;
+	unsigned long iflags;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	/*
+	 * There are certain command types we don't want to abort.  And we
+	 * don't want to abort commands that are already in the process of
+	 * being aborted.
+	 */
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+		return 0;
+
+	/* issue ABTS for this IOCB based on iotag */
+	abtsiocbp = __lpfc_sli_get_iocbq(phba);
+	if (abtsiocbp == NULL)
+		return 0;
+
+	/* This signals the response to set the correct status
+	 * before calling the completion handler
+	 */
+	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+	iabt = &abtsiocbp->iocb;
+	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
+	iabt->un.acxri.abortContextTag = icmd->ulpContext;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+		iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+	}
+	else
+		iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+	iabt->ulpLe = 1;
+	iabt->ulpClass = icmd->ulpClass;
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
+	if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+		abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+	if (cmdiocb->iocb_flag & LPFC_IO_FOF)
+		abtsiocbp->iocb_flag |= LPFC_IO_FOF;
+
+	if (phba->link_state >= LPFC_LINK_UP)
+		iabt->ulpCommand = CMD_ABORT_XRI_CN;
+	else
+		iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+
+	abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+	abtsiocbp->vport = vport;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "0339 Abort xri x%x, original iotag x%x, "
+			 "abort cmd iotag x%x\n",
+			 iabt->un.acxri.abortIoTag,
+			 iabt->un.acxri.abortContextTag,
+			 abtsiocbp->iotag);
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
+		if (unlikely(pring == NULL))
+			return 0;
+		/* Note: both hbalock and ring_lock need to be set here */
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+			abtsiocbp, 0);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+	} else {
+		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
+			abtsiocbp, 0);
+	}
+
+	if (retval)
+		__lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+	/*
+	 * Caller to this routine should check for IOCB_ERROR
+	 * and handle it properly.  This routine no longer removes
+	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
+	 */
+	return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			   struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	int retval = IOCB_ERROR;
+	IOCB_t *icmd = NULL;
+
+	lockdep_assert_held(&phba->hbalock);
+
+	/*
+	 * There are certain command types we don't want to abort.  And we
+	 * don't want to abort commands that are already in the process of
+	 * being aborted.
+	 */
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+		return 0;
+
+	if (!pring) {
+		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+		else
+			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+		goto abort_iotag_exit;
+	}
+
+	/*
+	 * If we're unloading, don't abort iocb on the ELS ring, but change
+	 * the callback so that nothing happens when it finishes.
+	 */
+	if ((vport->load_flag & FC_UNLOADING) &&
+	    (pring->ringno == LPFC_ELS_RING)) {
+		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+		else
+			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+		goto abort_iotag_exit;
+	}
+
+	/* Now, we try to issue the abort to the cmdiocb out */
+	retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
+abort_iotag_exit:
+	/*
+	 * Caller to this routine should check for IOCB_ERROR
+	 * and handle it properly.  This routine no longer removes
+	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
+	 */
+	return retval;
+}
+
+/**
+ * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
+ **/
+static int
+lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct lpfc_iocbq *abtsiocbp;
+	union lpfc_wqe *abts_wqe;
+	int retval;
+
+	/*
+	 * There are certain command types we don't want to abort.  And we
+	 * don't want to abort commands that are already in the process of
+	 * being aborted.
+	 */
+	if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+	    cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
+	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+		return 0;
+
+	/* issue ABTS for this io based on iotag */
+	abtsiocbp = __lpfc_sli_get_iocbq(phba);
+	if (abtsiocbp == NULL)
+		return 0;
+
+	/* This signals the response to set the correct status
+	 * before calling the completion handler
+	 */
+	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+	/* Complete prepping the abort wqe and issue to the FW. */
+	abts_wqe = &abtsiocbp->wqe;
+
+	/* Clear any stale WQE contents */
+	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
+
+	/* word 7 */
+	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+	bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
+	       cmdiocb->iocb.ulpClass);
+
+	/* word 8 - tell the FW to abort the IO associated with this
+	 * outstanding exchange ID.
+	 */
+	abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
+
+	/* word 9 - this is the iotag for the abts_wqe completion. */
+	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
+	       abtsiocbp->iotag);
+
+	/* word 10 */
+	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
+	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+	/* word 11 */
+	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
+	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocbp->iocb_flag |= LPFC_IO_NVME;
+	abtsiocbp->vport = vport;
+	abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
+	retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
+	if (retval) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+				 "6147 Failed abts issue_wqe with status x%x "
+				 "for oxid x%x\n",
+				 retval, cmdiocb->sli4_xritag);
+		lpfc_sli_release_iocbq(phba, abtsiocbp);
+		return retval;
+	}
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+			 "6148 Drv Abort NVME Request Issued for "
+			 "ox_id x%x on reqtag x%x\n",
+			 cmdiocb->sli4_xritag,
+			 abtsiocbp->iotag);
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_queue *qp = NULL;
+	int i;
+
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		for (i = 0; i < psli->num_rings; i++) {
+			pring = &psli->sli3_ring[i];
+			lpfc_sli_abort_iocb_ring(phba, pring);
+		}
+		return;
+	}
+	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+		pring = qp->pring;
+		if (!pring)
+			continue;
+		lpfc_sli_abort_iocb_ring(phba, pring);
+	}
+}
+
+/**
+ * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
+ * @iocbq: Pointer to driver iocb object.
+ * @vport: Pointer to driver virtual port object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
+ *
+ * This function acts as an iocb filter for functions which abort or count
+ * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * 0 if the filtering criteria is met for the given iocb and will return
+ * 1 if the filtering criteria is not met.
+ * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
+ * given iocb is for the SCSI device specified by vport, tgt_id and
+ * lun_id parameter.
+ * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
+ * given iocb is for the SCSI target specified by vport and tgt_id
+ * parameters.
+ * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
+ * given iocb is for the SCSI host associated with the given vport.
+ * This function is called with no locks held.
+ **/
+static int
+lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
+			   uint16_t tgt_id, uint64_t lun_id,
+			   lpfc_ctx_cmd ctx_cmd)
+{
+	struct lpfc_scsi_buf *lpfc_cmd;
+	int rc = 1;
+
+	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
+		return rc;
+
+	if (iocbq->vport != vport)
+		return rc;
+
+	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+
+	if (lpfc_cmd->pCmd == NULL)
+		return rc;
+
+	switch (ctx_cmd) {
+	case LPFC_CTX_LUN:
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
+			rc = 0;
+		break;
+	case LPFC_CTX_TGT:
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
+			rc = 0;
+		break;
+	case LPFC_CTX_HOST:
+		rc = 0;
+		break;
+	default:
+		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
+			__func__, ctx_cmd);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
+ * @vport: Pointer to virtual port.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function returns number of FCP commands pending for the vport.
+ * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
+ * commands pending on the vport associated with SCSI device specified
+ * by tgt_id and lun_id parameters.
+ * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
+ * commands pending on the vport associated with SCSI target specified
+ * by tgt_id parameter.
+ * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
+ * commands pending on the vport.
+ * This function returns the number of iocbs which satisfy the filter.
+ * This function is called without any lock held.
+ **/
+int
+lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
+		  lpfc_ctx_cmd ctx_cmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *iocbq;
+	int sum, i;
+
+	spin_lock_irq(&phba->hbalock);
+	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
+						ctx_cmd) == 0)
+			sum++;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return sum;
+}
+
+/**
+ * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb.
+ **/
+void
+lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3096 ABORT_XRI_CN completing on rpi x%x "
+			"original iotag x%x, abort cmd iotag x%x "
+			"status 0x%x, reason 0x%x\n",
+			cmdiocb->iocb.un.acxri.abortContextTag,
+			cmdiocb->iocb.un.acxri.abortIoTag,
+			cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+			rspiocb->iocb.un.ulpWord[4]);
+	lpfc_sli_release_iocbq(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it failed to abort.
+ * This function is called with no locks held.
+ **/
+int
+lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_iocbq *abtsiocb;
+	struct lpfc_sli_ring *pring_s4;
+	IOCB_t *cmd = NULL;
+	int errcnt = 0, ret_val = 0;
+	int i;
+
+	for (i = 1; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+					       abort_cmd) != 0)
+			continue;
+
+		/*
+		 * If the iocbq is already being aborted, don't take a second
+		 * action, but do count it.
+		 */
+		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+			continue;
+
+		/* issue ABTS for this IOCB based on iotag */
+		abtsiocb = lpfc_sli_get_iocbq(phba);
+		if (abtsiocb == NULL) {
+			errcnt++;
+			continue;
+		}
+
+		/* indicate the IO is being aborted by the driver. */
+		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+		cmd = &iocbq->iocb;
+		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
+		else
+			abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+		abtsiocb->iocb.ulpLe = 1;
+		abtsiocb->iocb.ulpClass = cmd->ulpClass;
+		abtsiocb->vport = vport;
+
+		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+		abtsiocb->hba_wqidx = iocbq->hba_wqidx;
+		if (iocbq->iocb_flag & LPFC_IO_FCP)
+			abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+		if (iocbq->iocb_flag & LPFC_IO_FOF)
+			abtsiocb->iocb_flag |= LPFC_IO_FOF;
+
+		if (lpfc_is_link_up(phba))
+			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+		else
+			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+		/* Setup callback routine and issue the command. */
+		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
+			if (!pring_s4)
+				continue;
+			ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+						      abtsiocb, 0);
+		} else
+			ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+						      abtsiocb, 0);
+		if (ret_val == IOCB_ERROR) {
+			lpfc_sli_release_iocbq(phba, abtsiocb);
+			errcnt++;
+			continue;
+		}
+	}
+
+	return errcnt;
+}
+
+/**
+ * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it aborted .
+ * This function is called with no locks held right after a taskmgmt
+ * command is sent.
+ **/
+int
+lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_iocbq *abtsiocbq;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_iocbq *iocbq;
+	IOCB_t *icmd;
+	int sum, i, ret_val;
+	unsigned long iflags;
+	struct lpfc_sli_ring *pring_s4;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* all I/Os are in process of being flushed */
+	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+	sum = 0;
+
+	for (i = 1; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+					       cmd) != 0)
+			continue;
+
+		/*
+		 * If the iocbq is already being aborted, don't take a second
+		 * action, but do count it.
+		 */
+		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+			continue;
+
+		/* issue ABTS for this IOCB based on iotag */
+		abtsiocbq = __lpfc_sli_get_iocbq(phba);
+		if (abtsiocbq == NULL)
+			continue;
+
+		icmd = &iocbq->iocb;
+		abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+		abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			abtsiocbq->iocb.un.acxri.abortIoTag =
+							 iocbq->sli4_xritag;
+		else
+			abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
+		abtsiocbq->iocb.ulpLe = 1;
+		abtsiocbq->iocb.ulpClass = icmd->ulpClass;
+		abtsiocbq->vport = vport;
+
+		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+		abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
+		if (iocbq->iocb_flag & LPFC_IO_FCP)
+			abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+		if (iocbq->iocb_flag & LPFC_IO_FOF)
+			abtsiocbq->iocb_flag |= LPFC_IO_FOF;
+
+		lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+		ndlp = lpfc_cmd->rdata->pnode;
+
+		if (lpfc_is_link_up(phba) &&
+		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
+			abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+		else
+			abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+		/* Setup callback routine and issue the command. */
+		abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+
+		/*
+		 * Indicate the IO is being aborted by the driver and set
+		 * the caller's flag into the aborted IO.
+		 */
+		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
+			if (pring_s4 == NULL)
+				continue;
+			/* Note: both hbalock and ring_lock must be set here */
+			spin_lock_irqsave(&pring_s4->ring_lock, iflags);
+			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+							abtsiocbq, 0);
+			spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
+		} else {
+			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
+							abtsiocbq, 0);
+		}
+
+
+		if (ret_val == IOCB_ERROR)
+			__lpfc_sli_release_iocbq(phba, abtsiocbq);
+		else
+			sum++;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return sum;
+}
+
+/**
+ * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_sli_issue_iocb_wait function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	wait_queue_head_t *pdone_q;
+	unsigned long iflags;
+	struct lpfc_scsi_buf *lpfc_cmd;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+		/*
+		 * A time out has occurred for the iocb.  If a time out
+		 * completion handler has been supplied, call it.  Otherwise,
+		 * just free the iocbq.
+		 */
+
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+		cmdiocbq->wait_iocb_cmpl = NULL;
+		if (cmdiocbq->iocb_cmpl)
+			(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+		else
+			lpfc_sli_release_iocbq(phba, cmdiocbq);
+		return;
+	}
+
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+
+	/* Set the exchange busy flag for task management commands */
+	if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+		!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+		lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+			cur_iocbq);
+		lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+	}
+
+	pdone_q = cmdiocbq->context_un.wait_queue;
+	if (pdone_q)
+		wake_up(pdone_q);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return;
+}
+
+/**
+ * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
+ * @phba: Pointer to HBA context object..
+ * @piocbq: Pointer to command iocb.
+ * @flag: Flag to test.
+ *
+ * This routine grabs the hbalock and then test the iocb_flag to
+ * see if the passed in flag is set.
+ * Returns:
+ * 1 if flag is set.
+ * 0 if flag is not set.
+ **/
+static int
+lpfc_chk_iocb_flg(struct lpfc_hba *phba,
+		 struct lpfc_iocbq *piocbq, uint32_t flag)
+{
+	unsigned long iflags;
+	int ret;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ret = piocbq->iocb_flag & flag;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+
+}
+
+/**
+ * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
+ * @phba: Pointer to HBA context object..
+ * @pring: Pointer to sli ring.
+ * @piocb: Pointer to command iocb.
+ * @prspiocbq: Pointer to response iocb.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the iocb to firmware and waits for the
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure.  If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up.  If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
+ * The function waits for the iocb completion using an
+ * non-interruptible wait.
+ * This function will sleep while waiting for iocb completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the iocb completions occur while
+ * this function sleep. So, this function cannot be called from
+ * the thread which process iocb completion for this ring.
+ * This function clears the iocb_flag of the iocb object before
+ * issuing the iocb and the iocb completion handler sets this
+ * flag and wakes this thread when the iocb completes.
+ * The contents of the response iocb will be copied to prspiocbq
+ * by the completion handler when the command completes.
+ * This function returns IOCB_SUCCESS when success.
+ * This function is called with no lock held.
+ **/
+int
+lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
+			 uint32_t ring_number,
+			 struct lpfc_iocbq *piocb,
+			 struct lpfc_iocbq *prspiocbq,
+			 uint32_t timeout)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+	long timeleft, timeout_req = 0;
+	int retval = IOCB_SUCCESS;
+	uint32_t creg_val;
+	struct lpfc_iocbq *iocb;
+	int txq_cnt = 0;
+	int txcmplq_cnt = 0;
+	struct lpfc_sli_ring *pring;
+	unsigned long iflags;
+	bool iocb_completed = true;
+
+	if (phba->sli_rev >= LPFC_SLI_REV4)
+		pring = lpfc_sli4_calc_ring(phba, piocb);
+	else
+		pring = &phba->sli.sli3_ring[ring_number];
+	/*
+	 * If the caller has provided a response iocbq buffer, then context2
+	 * is NULL or its an error.
+	 */
+	if (prspiocbq) {
+		if (piocb->context2)
+			return IOCB_ERROR;
+		piocb->context2 = prspiocbq;
+	}
+
+	piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
+	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+	piocb->context_un.wait_queue = &done_q;
+	piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val))
+			return IOCB_ERROR;
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
+				     SLI_IOCB_RET_IOCB);
+	if (retval == IOCB_SUCCESS) {
+		timeout_req = msecs_to_jiffies(timeout * 1000);
+		timeleft = wait_event_timeout(done_q,
+				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
+				timeout_req);
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+
+			/*
+			 * IOCB timed out.  Inform the wake iocb wait
+			 * completion function and set local status
+			 */
+
+			iocb_completed = false;
+			piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		if (iocb_completed) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"0331 IOCB wake signaled\n");
+			/* Note: we are not indicating if the IOCB has a success
+			 * status or not - that's for the caller to check.
+			 * IOCB_SUCCESS means just that the command was sent and
+			 * completed. Not that it completed successfully.
+			 * */
+		} else if (timeleft == 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0338 IOCB wait timeout error - no "
+					"wake response Data x%x\n", timeout);
+			retval = IOCB_TIMEDOUT;
+		} else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0330 IOCB wake NOT set, "
+					"Data x%x x%lx\n",
+					timeout, (timeleft / jiffies));
+			retval = IOCB_TIMEDOUT;
+		}
+	} else if (retval == IOCB_BUSY) {
+		if (phba->cfg_log_verbose & LOG_SLI) {
+			list_for_each_entry(iocb, &pring->txq, list) {
+				txq_cnt++;
+			}
+			list_for_each_entry(iocb, &pring->txcmplq, list) {
+				txcmplq_cnt++;
+			}
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+		}
+		return retval;
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"0332 IOCB wait issue failed, Data x%x\n",
+				retval);
+		retval = IOCB_ERROR;
+	}
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val))
+			return IOCB_ERROR;
+		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	if (prspiocbq)
+		piocb->context2 = NULL;
+
+	piocb->context_un.wait_queue = NULL;
+	piocb->iocb_cmpl = NULL;
+	return retval;
+}
+
+/**
+ * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to driver mailbox object.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the mailbox to firmware and waits for the
+ * mailbox command to complete. If the mailbox command is not
+ * completed within timeout seconds, it returns MBX_TIMEOUT.
+ * The function waits for the mailbox completion using an
+ * interruptible wait. If the thread is woken up due to a
+ * signal, MBX_TIMEOUT error is returned to the caller. Caller
+ * should not free the mailbox resources, if this function returns
+ * MBX_TIMEOUT.
+ * This function will sleep while waiting for mailbox completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the mailbox completion occurs while
+ * this function sleep. So, this function cannot be called from
+ * the worker thread which processes mailbox completion.
+ * This function is called in the context of HBA management
+ * applications.
+ * This function returns MBX_SUCCESS when successful.
+ * This function is called with no lock held.
+ **/
+int
+lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
+			 uint32_t timeout)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+	MAILBOX_t *mb = NULL;
+	int retval;
+	unsigned long flag;
+
+	/* The caller might set context1 for extended buffer */
+	if (pmboxq->context1)
+		mb = (MAILBOX_t *)pmboxq->context1;
+
+	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
+	/* setup wake call as IOCB callback */
+	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
+	/* setup context field to pass wait_queue pointer to wake function  */
+	pmboxq->context1 = &done_q;
+
+	/* now issue the command */
+	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
+		wait_event_interruptible_timeout(done_q,
+				pmboxq->mbox_flag & LPFC_MBX_WAKE,
+				msecs_to_jiffies(timeout * 1000));
+
+		spin_lock_irqsave(&phba->hbalock, flag);
+		/* restore the possible extended buffer for free resource */
+		pmboxq->context1 = (uint8_t *)mb;
+		/*
+		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
+		 * else do not free the resources.
+		 */
+		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
+			retval = MBX_SUCCESS;
+		} else {
+			retval = MBX_TIMEOUT;
+			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flag);
+	} else {
+		/* restore the possible extended buffer for free resource */
+		pmboxq->context1 = (uint8_t *)mb;
+	}
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to shutdown the driver's mailbox sub-system.
+ * It first marks the mailbox sub-system is in a block state to prevent
+ * the asynchronous mailbox command from issued off the pending mailbox
+ * command queue. If the mailbox command sub-system shutdown is due to
+ * HBA error conditions such as EEH or ERATT, this routine shall invoke
+ * the mailbox sub-system flush routine to forcefully bring down the
+ * mailbox sub-system. Otherwise, if it is due to normal condition (such
+ * as with offline or HBA function reset), this routine will wait for the
+ * outstanding mailbox command to complete before invoking the mailbox
+ * sub-system flush routine to gracefully bring down mailbox sub-system.
+ **/
+void
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	unsigned long timeout;
+
+	if (mbx_action == LPFC_MBX_NO_WAIT) {
+		/* delay 100ms for port state */
+		msleep(100);
+		lpfc_sli_mbox_sys_flush(phba);
+		return;
+	}
+	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+
+	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+		/* Determine how long we might wait for the active mailbox
+		 * command to be gracefully completed by firmware.
+		 */
+		if (phba->sli.mbox_active)
+			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+						phba->sli.mbox_active) *
+						1000) + jiffies;
+		spin_unlock_irq(&phba->hbalock);
+
+		while (phba->sli.mbox_active) {
+			/* Check active mailbox complete status every 2ms */
+			msleep(2);
+			if (time_after(jiffies, timeout))
+				/* Timeout, let the mailbox flush routine to
+				 * forcefully release active mailbox command
+				 */
+				break;
+		}
+	} else
+		spin_unlock_irq(&phba->hbalock);
+
+	lpfc_sli_mbox_sys_flush(phba);
+}
+
+/**
+ * lpfc_sli_eratt_read - read sli-3 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI3 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli_eratt_read(struct lpfc_hba *phba)
+{
+	uint32_t ha_copy;
+
+	/* Read chip Host Attention (HA) register */
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		goto unplug_err;
+
+	if (ha_copy & HA_ERATT) {
+		/* Read host status register to retrieve error event */
+		if (lpfc_sli_read_hs(phba))
+			goto unplug_err;
+
+		/* Check if there is a deferred error condition is active */
+		if ((HS_FFER1 & phba->work_hs) &&
+		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
+			phba->hba_flag |= DEFER_ERATT;
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr);
+		}
+
+		/* Set the driver HA work bitmap */
+		phba->work_ha |= HA_ERATT;
+		/* Indicate polling handles this ERATT */
+		phba->hba_flag |= HBA_ERATT_HANDLED;
+		return 1;
+	}
+	return 0;
+
+unplug_err:
+	/* Set the driver HS work bitmap */
+	phba->work_hs |= UNPLUG_ERR;
+	/* Set the driver HA work bitmap */
+	phba->work_ha |= HA_ERATT;
+	/* Indicate polling handles this ERATT */
+	phba->hba_flag |= HBA_ERATT_HANDLED;
+	return 1;
+}
+
+/**
+ * lpfc_sli4_eratt_read - read sli-4 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI4 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli4_eratt_read(struct lpfc_hba *phba)
+{
+	uint32_t uerr_sta_hi, uerr_sta_lo;
+	uint32_t if_type, portsmphr;
+	struct lpfc_register portstat_reg;
+
+	/*
+	 * For now, use the SLI4 device internal unrecoverable error
+	 * registers for error attention. This can be changed later.
+	 */
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
+			&uerr_sta_lo) ||
+			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
+			&uerr_sta_hi)) {
+			phba->work_hs |= UNPLUG_ERR;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1423 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"ue_mask_lo_reg=0x%x, "
+					"ue_mask_hi_reg=0x%x\n",
+					uerr_sta_lo, uerr_sta_hi,
+					phba->sli4_hba.ue_mask_lo,
+					phba->sli4_hba.ue_mask_hi);
+			phba->work_status[0] = uerr_sta_lo;
+			phba->work_status[1] = uerr_sta_hi;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+			&portstat_reg.word0) ||
+			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+			&portsmphr)){
+			phba->work_hs |= UNPLUG_ERR;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
+			phba->work_status[0] =
+				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+			phba->work_status[1] =
+				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2885 Port Status Event: "
+					"port status reg 0x%x, "
+					"port smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					portstat_reg.word0,
+					portsmphr,
+					phba->work_status[0],
+					phba->work_status[1]);
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2886 HBA Error Attention on unsupported "
+				"if type %d.", if_type);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_check_eratt - check error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called from timer soft interrupt context to check HBA's
+ * error attention register bit for error attention events.
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+int
+lpfc_sli_check_eratt(struct lpfc_hba *phba)
+{
+	uint32_t ha_copy;
+
+	/* If somebody is waiting to handle an eratt, don't process it
+	 * here. The brdkill function will do this.
+	 */
+	if (phba->link_flag & LS_IGNORE_ERATT)
+		return 0;
+
+	/* Check if interrupt handler handles this ERATT */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->hba_flag & HBA_ERATT_HANDLED) {
+		/* Interrupt handler has handled ERATT */
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+
+	/*
+	 * If there is deferred error attention, do not check for error
+	 * attention
+	 */
+	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+
+	/* If PCI channel is offline, don't process it */
+	if (unlikely(pci_channel_offline(phba->pcidev))) {
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+
+	switch (phba->sli_rev) {
+	case LPFC_SLI_REV2:
+	case LPFC_SLI_REV3:
+		/* Read chip Host Attention (HA) register */
+		ha_copy = lpfc_sli_eratt_read(phba);
+		break;
+	case LPFC_SLI_REV4:
+		/* Read device Uncoverable Error (UERR) registers */
+		ha_copy = lpfc_sli4_eratt_read(phba);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0299 Invalid SLI revision (%d)\n",
+				phba->sli_rev);
+		ha_copy = 0;
+		break;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return ha_copy;
+}
+
+/**
+ * lpfc_intr_state_check - Check device state for interrupt handling
+ * @phba: Pointer to HBA context.
+ *
+ * This inline routine checks whether a device or its PCI slot is in a state
+ * that the interrupt should be handled.
+ *
+ * This function returns 0 if the device or the PCI slot is in a state that
+ * interrupt should be handled, otherwise -EIO.
+ */
+static inline int
+lpfc_intr_state_check(struct lpfc_hba *phba)
+{
+	/* If the pci channel is offline, ignore all the interrupts */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -EIO;
+
+	/* Update device level interrupt statistics */
+	phba->sli.slistat.sli_intr++;
+
+	/* Ignore all interrupts during initialization. */
+	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The link attention and ELS ring attention events are
+ * handled by the worker thread. The interrupt handler signals the worker
+ * thread and returns for these events. This function is called without
+ * any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_sp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	uint32_t ha_copy, hc_copy;
+	uint32_t work_ha_copy;
+	unsigned long status;
+	unsigned long iflag;
+	uint32_t control;
+
+	MAILBOX_t *mbox, *pmbox;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	/*
+	 * Get the driver's phba structure from the dev_id and
+	 * assume the HBA is not interrupting.
+	 */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Stuff needs to be attented to when this function is invoked as an
+	 * individual interrupt handler in MSI-X multi-message interrupt mode
+	 */
+	if (phba->intr_type == MSIX) {
+		/* Check device state for handling interrupt */
+		if (lpfc_intr_state_check(phba))
+			return IRQ_NONE;
+		/* Need to read HA REG for slow-path events */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			goto unplug_error;
+		/* If somebody is waiting to handle an eratt don't process it
+		 * here. The brdkill function will do this.
+		 */
+		if (phba->link_flag & LS_IGNORE_ERATT)
+			ha_copy &= ~HA_ERATT;
+		/* Check the need for handling ERATT in interrupt handler */
+		if (ha_copy & HA_ERATT) {
+			if (phba->hba_flag & HBA_ERATT_HANDLED)
+				/* ERATT polling has handled ERATT */
+				ha_copy &= ~HA_ERATT;
+			else
+				/* Indicate interrupt handler handles ERATT */
+				phba->hba_flag |= HBA_ERATT_HANDLED;
+		}
+
+		/*
+		 * If there is deferred error attention, do not check for any
+		 * interrupt.
+		 */
+		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			return IRQ_NONE;
+		}
+
+		/* Clear up only attention source related to slow-path */
+		if (lpfc_readl(phba->HCregaddr, &hc_copy))
+			goto unplug_error;
+
+		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
+			HC_LAINT_ENA | HC_ERINT_ENA),
+			phba->HCregaddr);
+		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
+			phba->HAregaddr);
+		writel(hc_copy, phba->HCregaddr);
+		readl(phba->HAregaddr); /* flush */
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+	} else
+		ha_copy = phba->ha_copy;
+
+	work_ha_copy = ha_copy & phba->work_ha_mask;
+
+	if (work_ha_copy) {
+		if (work_ha_copy & HA_LATT) {
+			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
+				/*
+				 * Turn off Link Attention interrupts
+				 * until CLEAR_LA done
+				 */
+				spin_lock_irqsave(&phba->hbalock, iflag);
+				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
+				if (lpfc_readl(phba->HCregaddr, &control))
+					goto unplug_error;
+				control &= ~HC_LAINT_ENA;
+				writel(control, phba->HCregaddr);
+				readl(phba->HCregaddr); /* flush */
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+			}
+			else
+				work_ha_copy &= ~HA_LATT;
+		}
+
+		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
+			/*
+			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
+			 * the only slow ring.
+			 */
+			status = (work_ha_copy &
+				(HA_RXMASK  << (4*LPFC_ELS_RING)));
+			status >>= (4*LPFC_ELS_RING);
+			if (status & HA_RXMASK) {
+				spin_lock_irqsave(&phba->hbalock, iflag);
+				if (lpfc_readl(phba->HCregaddr, &control))
+					goto unplug_error;
+
+				lpfc_debugfs_slow_ring_trc(phba,
+				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
+				control, status,
+				(uint32_t)phba->sli.slistat.sli_intr);
+
+				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
+					lpfc_debugfs_slow_ring_trc(phba,
+						"ISR Disable ring:"
+						"pwork:x%x hawork:x%x wait:x%x",
+						phba->work_ha, work_ha_copy,
+						(uint32_t)((unsigned long)
+						&phba->work_waitq));
+
+					control &=
+					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
+					writel(control, phba->HCregaddr);
+					readl(phba->HCregaddr); /* flush */
+				}
+				else {
+					lpfc_debugfs_slow_ring_trc(phba,
+						"ISR slow ring:   pwork:"
+						"x%x hawork:x%x wait:x%x",
+						phba->work_ha, work_ha_copy,
+						(uint32_t)((unsigned long)
+						&phba->work_waitq));
+				}
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+			}
+		}
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (work_ha_copy & HA_ERATT) {
+			if (lpfc_sli_read_hs(phba))
+				goto unplug_error;
+			/*
+			 * Check if there is a deferred error condition
+			 * is active
+			 */
+			if ((HS_FFER1 & phba->work_hs) &&
+				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
+				  phba->work_hs)) {
+				phba->hba_flag |= DEFER_ERATT;
+				/* Clear all interrupt enable conditions */
+				writel(0, phba->HCregaddr);
+				readl(phba->HCregaddr);
+			}
+		}
+
+		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
+			pmb = phba->sli.mbox_active;
+			pmbox = &pmb->u.mb;
+			mbox = phba->mbox;
+			vport = pmb->vport;
+
+			/* First check out the status word */
+			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
+			if (pmbox->mbxOwner != OWN_HOST) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				/*
+				 * Stray Mailbox Interrupt, mbxCommand <cmd>
+				 * mbxStatus <status>
+				 */
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI,
+						"(%d):0304 Stray Mailbox "
+						"Interrupt mbxCommand x%x "
+						"mbxStatus x%x\n",
+						(vport ? vport->vpi : 0),
+						pmbox->mbxCommand,
+						pmbox->mbxStatus);
+				/* clear mailbox attention bit */
+				work_ha_copy &= ~HA_MBATT;
+			} else {
+				phba->sli.mbox_active = NULL;
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				phba->last_completion_time = jiffies;
+				del_timer(&phba->sli.mbox_tmo);
+				if (pmb->mbox_cmpl) {
+					lpfc_sli_pcimem_bcopy(mbox, pmbox,
+							MAILBOX_CMD_SIZE);
+					if (pmb->out_ext_byte_len &&
+						pmb->context2)
+						lpfc_sli_pcimem_bcopy(
+						phba->mbox_ext,
+						pmb->context2,
+						pmb->out_ext_byte_len);
+				}
+				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+
+					lpfc_debugfs_disc_trc(vport,
+						LPFC_DISC_TRC_MBOX_VPORT,
+						"MBOX dflt rpi: : "
+						"status:x%x rpi:x%x",
+						(uint32_t)pmbox->mbxStatus,
+						pmbox->un.varWords[0], 0);
+
+					if (!pmbox->mbxStatus) {
+						mp = (struct lpfc_dmabuf *)
+							(pmb->context1);
+						ndlp = (struct lpfc_nodelist *)
+							pmb->context2;
+
+						/* Reg_LOGIN of dflt RPI was
+						 * successful. new lets get
+						 * rid of the RPI using the
+						 * same mbox buffer.
+						 */
+						lpfc_unreg_login(phba,
+							vport->vpi,
+							pmbox->un.varWords[0],
+							pmb);
+						pmb->mbox_cmpl =
+							lpfc_mbx_cmpl_dflt_rpi;
+						pmb->context1 = mp;
+						pmb->context2 = ndlp;
+						pmb->vport = vport;
+						rc = lpfc_sli_issue_mbox(phba,
+								pmb,
+								MBX_NOWAIT);
+						if (rc != MBX_BUSY)
+							lpfc_printf_log(phba,
+							KERN_ERR,
+							LOG_MBOX | LOG_SLI,
+							"0350 rc should have"
+							"been MBX_BUSY\n");
+						if (rc != MBX_NOT_FINISHED)
+							goto send_current_mbox;
+					}
+				}
+				spin_lock_irqsave(
+						&phba->pport->work_port_lock,
+						iflag);
+				phba->pport->work_port_events &=
+					~WORKER_MBOX_TMO;
+				spin_unlock_irqrestore(
+						&phba->pport->work_port_lock,
+						iflag);
+				lpfc_mbox_cmpl_put(phba, pmb);
+			}
+		} else
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		if ((work_ha_copy & HA_MBATT) &&
+		    (phba->sli.mbox_active == NULL)) {
+send_current_mbox:
+			/* Process next mailbox command if there is one */
+			do {
+				rc = lpfc_sli_issue_mbox(phba, NULL,
+							 MBX_NOWAIT);
+			} while (rc == MBX_NOT_FINISHED);
+			if (rc != MBX_SUCCESS)
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI, "0349 rc should be "
+						"MBX_SUCCESS\n");
+		}
+
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		phba->work_ha |= work_ha_copy;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		lpfc_worker_wake_up(phba);
+	}
+	return IRQ_HANDLED;
+unplug_error:
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return IRQ_HANDLED;
+
+} /* lpfc_sli_sp_intr_handler */
+
+/**
+ * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_fp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	uint32_t ha_copy;
+	unsigned long status;
+	unsigned long iflag;
+	struct lpfc_sli_ring *pring;
+
+	/* Get the driver's phba structure from the dev_id and
+	 * assume the HBA is not interrupting.
+	 */
+	phba = (struct lpfc_hba *) dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Stuff needs to be attented to when this function is invoked as an
+	 * individual interrupt handler in MSI-X multi-message interrupt mode
+	 */
+	if (phba->intr_type == MSIX) {
+		/* Check device state for handling interrupt */
+		if (lpfc_intr_state_check(phba))
+			return IRQ_NONE;
+		/* Need to read HA REG for FCP ring and other ring events */
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return IRQ_HANDLED;
+		/* Clear up only attention source related to fast-path */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		/*
+		 * If there is deferred error attention, do not check for
+		 * any interrupt.
+		 */
+		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			return IRQ_NONE;
+		}
+		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
+			phba->HAregaddr);
+		readl(phba->HAregaddr); /* flush */
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+	} else
+		ha_copy = phba->ha_copy;
+
+	/*
+	 * Process all events on FCP ring. Take the optimized path for FCP IO.
+	 */
+	ha_copy &= ~(phba->work_ha_mask);
+
+	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+	status >>= (4*LPFC_FCP_RING);
+	pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
+	if (status & HA_RXMASK)
+		lpfc_sli_handle_fast_ring_event(phba, pring, status);
+
+	if (phba->cfg_multi_ring_support == 2) {
+		/*
+		 * Process all events on extra ring. Take the optimized path
+		 * for extra ring IO.
+		 */
+		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+		status >>= (4*LPFC_EXTRA_RING);
+		if (status & HA_RXMASK) {
+			lpfc_sli_handle_fast_ring_event(phba,
+					&phba->sli.sli3_ring[LPFC_EXTRA_RING],
+					status);
+		}
+	}
+	return IRQ_HANDLED;
+}  /* lpfc_sli_fp_intr_handler */
+
+/**
+ * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the HBA device-level interrupt handler to device with
+ * SLI-3 interface spec, called from the PCI layer when either MSI or
+ * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
+ * requires driver attention. This function invokes the slow-path interrupt
+ * attention handling function and fast-path interrupt attention handling
+ * function in turn to process the relevant HBA attention events. This
+ * function is called without any lock held. It gets the hbalock to access
+ * and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	irqreturn_t sp_irq_rc, fp_irq_rc;
+	unsigned long status1, status2;
+	uint32_t hc_copy;
+
+	/*
+	 * Get the driver's phba structure from the dev_id and
+	 * assume the HBA is not interrupting.
+	 */
+	phba = (struct lpfc_hba *) dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (lpfc_intr_state_check(phba))
+		return IRQ_NONE;
+
+	spin_lock(&phba->hbalock);
+	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_HANDLED;
+	}
+
+	if (unlikely(!phba->ha_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_NONE;
+	} else if (phba->ha_copy & HA_ERATT) {
+		if (phba->hba_flag & HBA_ERATT_HANDLED)
+			/* ERATT polling has handled ERATT */
+			phba->ha_copy &= ~HA_ERATT;
+		else
+			/* Indicate interrupt handler handles ERATT */
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+	}
+
+	/*
+	 * If there is deferred error attention, do not check for any interrupt.
+	 */
+	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_NONE;
+	}
+
+	/* Clear attention sources except link and error attentions */
+	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_HANDLED;
+	}
+	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
+		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
+		phba->HCregaddr);
+	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+	writel(hc_copy, phba->HCregaddr);
+	readl(phba->HAregaddr); /* flush */
+	spin_unlock(&phba->hbalock);
+
+	/*
+	 * Invokes slow-path host attention interrupt handling as appropriate.
+	 */
+
+	/* status of events with mailbox and link attention */
+	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
+
+	/* status of events with ELS ring */
+	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
+	status2 >>= (4*LPFC_ELS_RING);
+
+	if (status1 || (status2 & HA_RXMASK))
+		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
+	else
+		sp_irq_rc = IRQ_NONE;
+
+	/*
+	 * Invoke fast-path host attention interrupt handling as appropriate.
+	 */
+
+	/* status of events with FCP ring */
+	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+	status1 >>= (4*LPFC_FCP_RING);
+
+	/* status of events with extra ring */
+	if (phba->cfg_multi_ring_support == 2) {
+		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+		status2 >>= (4*LPFC_EXTRA_RING);
+	} else
+		status2 = 0;
+
+	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
+		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
+	else
+		fp_irq_rc = IRQ_NONE;
+
+	/* Return device-level interrupt handling status */
+	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
+}  /* lpfc_sli_intr_handler */
+
+/**
+ * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 FCP abort XRI events.
+ **/
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the fcp xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the fcp xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for FCP work queue */
+		lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 NVME abort XRI events.
+ **/
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the fcp xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the fcp xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for NVME work queue */
+		if (phba->nvmet_support) {
+			lpfc_sli4_nvmet_xri_aborted(phba,
+						    &cq_event->cqe.wcqe_axri);
+		} else {
+			lpfc_sli4_nvme_xri_aborted(phba,
+						   &cq_event->cqe.wcqe_axri);
+		}
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 els abort xri events.
+ **/
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the els xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the els xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for ELS work queue */
+		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
+static void
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+			      struct lpfc_iocbq *pIocbIn,
+			      struct lpfc_iocbq *pIocbOut,
+			      struct lpfc_wcqe_complete *wcqe)
+{
+	int numBdes, i;
+	unsigned long iflags;
+	uint32_t status, max_response;
+	struct lpfc_dmabuf *dmabuf;
+	struct ulp_bde64 *bpl, bde;
+	size_t offset = offsetof(struct lpfc_iocbq, iocb);
+
+	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
+	       sizeof(struct lpfc_iocbq) - offset);
+	/* Map WCQE parameters into irspiocb parameters */
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
+	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
+		if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+			pIocbIn->iocb.un.fcpi.fcpi_parm =
+					pIocbOut->iocb.un.fcpi.fcpi_parm -
+					wcqe->total_data_placed;
+		else
+			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+	else {
+		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+		switch (pIocbOut->iocb.ulpCommand) {
+		case CMD_ELS_REQUEST64_CR:
+			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+			bpl  = (struct ulp_bde64 *)dmabuf->virt;
+			bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+			max_response = bde.tus.f.bdeSize;
+			break;
+		case CMD_GEN_REQUEST64_CR:
+			max_response = 0;
+			if (!pIocbOut->context3)
+				break;
+			numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+					sizeof(struct ulp_bde64);
+			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+			bpl = (struct ulp_bde64 *)dmabuf->virt;
+			for (i = 0; i < numBdes; i++) {
+				bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+				if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+					max_response += bde.tus.f.bdeSize;
+			}
+			break;
+		default:
+			max_response = wcqe->total_data_placed;
+			break;
+		}
+		if (max_response < wcqe->total_data_placed)
+			pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+		else
+			pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+				wcqe->total_data_placed;
+	}
+
+	/* Convert BG errors for completion status */
+	if (status == CQE_STATUS_DI_ERROR) {
+		pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+
+		if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
+			pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
+		else
+			pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
+
+		pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
+		if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_GUARD_ERR_MASK;
+		if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_APPTAG_ERR_MASK;
+		if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_REFTAG_ERR_MASK;
+
+		/* Check to see if there was any good data before the error */
+		if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_HI_WATER_MARK_PRESENT_MASK;
+			pIocbIn->iocb.unsli3.sli3_bg.bghm =
+				wcqe->total_data_placed;
+		}
+
+		/*
+		* Set ALL the error bits to indicate we don't know what
+		* type of error it is.
+		*/
+		if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				(BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+				BGS_GUARD_ERR_MASK);
+	}
+
+	/* Pick up HBA exchange busy condition */
+	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+}
+
+/**
+ * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event and construct
+ * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * discovery engine to handle.
+ *
+ * Return: Pointer to the receive IOCBQ, NULL otherwise.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
+			       struct lpfc_iocbq *irspiocbq)
+{
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_wcqe_complete *wcqe;
+	unsigned long iflags;
+
+	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return NULL;
+
+	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+	spin_lock_irqsave(&pring->ring_lock, iflags);
+	pring->stats.iocb_event++;
+	/* Look up the ELS command IOCB and create pseudo response IOCB */
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	if (unlikely(!cmdiocbq)) {
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0386 ELS complete with no corresponding "
+				"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
+				wcqe->word0, wcqe->total_data_placed,
+				wcqe->parameter, wcqe->word3);
+		lpfc_sli_release_iocbq(phba, irspiocbq);
+		return NULL;
+	}
+
+	/* Put the iocb back on the txcmplq */
+	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
+	spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+	/* Fake the irspiocbq and copy necessary response information */
+	lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+
+	return irspiocbq;
+}
+
+/**
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with asynchrous
+ * event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0392 Async Event: word0:x%x, word1:x%x, "
+			"word2:x%x, word3:x%x\n", mcqe->word0,
+			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
+
+	/* Allocate a new internal CQ_EVENT entry */
+	cq_event = lpfc_sli4_cq_event_alloc(phba);
+	if (!cq_event) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0394 Failed to allocate CQ_EVENT entry\n");
+		return false;
+	}
+
+	/* Move the CQE into an asynchronous event entry */
+	memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+	/* Set the async event flag */
+	phba->hba_flag |= ASYNC_EVENT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with mailbox
+ * completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+	uint32_t mcqe_status;
+	MAILBOX_t *mbox, *pmbox;
+	struct lpfc_mqe *mqe;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *mp;
+	unsigned long iflags;
+	LPFC_MBOXQ_t *pmb;
+	bool workposted = false;
+	int rc;
+
+	/* If not a mailbox complete MCQE, out by checking mailbox consume */
+	if (!bf_get(lpfc_trailer_completed, mcqe))
+		goto out_no_mqe_complete;
+
+	/* Get the reference to the active mbox command */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pmb = phba->sli.mbox_active;
+	if (unlikely(!pmb)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"1832 No pending MBOX command to handle\n");
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		goto out_no_mqe_complete;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	mqe = &pmb->u.mqe;
+	pmbox = (MAILBOX_t *)&pmb->u.mqe;
+	mbox = phba->mbox;
+	vport = pmb->vport;
+
+	/* Reset heartbeat timer */
+	phba->last_completion_time = jiffies;
+	del_timer(&phba->sli.mbox_tmo);
+
+	/* Move mbox data to caller's mailbox region, do endian swapping */
+	if (pmb->mbox_cmpl && mbox)
+		lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+
+	/*
+	 * For mcqe errors, conditionally move a modified error code to
+	 * the mbox so that the error will not be missed.
+	 */
+	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
+			bf_set(lpfc_mqe_status, mqe,
+			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
+	}
+	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
+				      "MBOX dflt rpi: status:x%x rpi:x%x",
+				      mcqe_status,
+				      pmbox->un.varWords[0], 0);
+		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+			mp = (struct lpfc_dmabuf *)(pmb->context1);
+			ndlp = (struct lpfc_nodelist *)pmb->context2;
+			/* Reg_LOGIN of dflt RPI was successful. Now lets get
+			 * RID of the PPI using the same mbox buffer.
+			 */
+			lpfc_unreg_login(phba, vport->vpi,
+					 pmbox->un.varWords[0], pmb);
+			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+			pmb->context1 = mp;
+			pmb->context2 = ndlp;
+			pmb->vport = vport;
+			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+			if (rc != MBX_BUSY)
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI, "0385 rc should "
+						"have been MBX_BUSY\n");
+			if (rc != MBX_NOT_FINISHED)
+				goto send_current_mbox;
+		}
+	}
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+	/* There is mailbox completion work to do */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_mbox_cmpl_put(phba, pmb);
+	phba->work_ha |= HA_MBATT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	workposted = true;
+
+send_current_mbox:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Release the mailbox command posting token */
+	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	/* Setting active mailbox pointer need to be in sync to flag clear */
+	phba->sli.mbox_active = NULL;
+	if (bf_get(lpfc_trailer_consumed, mcqe))
+		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	/* Wake up worker thread to post the next pending mailbox command */
+	lpfc_worker_wake_up(phba);
+	return workposted;
+
+out_no_mqe_complete:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (bf_get(lpfc_trailer_consumed, mcqe))
+		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return false;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry, it invokes the
+ * proper mailbox complete handling or asynchrous event handling routine
+ * according to the MCQE's async bit.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+	struct lpfc_mcqe mcqe;
+	bool workposted;
+
+	/* Copy the mailbox MCQE and convert endian order as needed */
+	lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+
+	/* Invoke the proper event handling routine */
+	if (!bf_get(lpfc_trailer_async, &mcqe))
+		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
+	else
+		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_iocbq *irspiocbq;
+	unsigned long iflags;
+	struct lpfc_sli_ring *pring = cq->pring;
+	int txq_cnt = 0;
+	int txcmplq_cnt = 0;
+	int fcp_txcmplq_cnt = 0;
+
+	/* Get an irspiocbq for later ELS response processing use */
+	irspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!irspiocbq) {
+		if (!list_empty(&pring->txq))
+			txq_cnt++;
+		if (!list_empty(&pring->txcmplq))
+			txcmplq_cnt++;
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
+			"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+			txq_cnt, phba->iocb_cnt,
+			fcp_txcmplq_cnt,
+			txcmplq_cnt);
+		return false;
+	}
+
+	/* Save off the slow-path queue event for work thread to process */
+	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_add_tail(&irspiocbq->cq_event.list,
+		      &phba->sli4_hba.sp_queue_event);
+	phba->hba_flag |= HBA_SP_QUEUE_EVT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles slow-path WQ entry consumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_release *wcqe)
+{
+	/* sanity check on queue memory */
+	if (unlikely(!phba->sli4_hba.els_wq))
+		return;
+	/* Check for the slow-path ELS work queue */
+	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
+		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
+				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+	else
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2579 Slow-path wqe consume event carries "
+				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
+				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
+				phba->sli4_hba.els_wq->queue_id);
+}
+
+/**
+ * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to a WQ completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an XRI abort event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
+				   struct lpfc_queue *cq,
+				   struct sli4_wcqe_xri_aborted *wcqe)
+{
+	bool workposted = false;
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	/* Allocate a new internal CQ_EVENT entry */
+	cq_event = lpfc_sli4_cq_event_alloc(phba);
+	if (!cq_event) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0602 Failed to allocate CQ_EVENT entry\n");
+		return false;
+	}
+
+	/* Move the CQE into the proper xri abort event list */
+	memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+	switch (cq->subtype) {
+	case LPFC_FCP:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+		/* Set the fcp xri abort event flag */
+		phba->hba_flag |= FCP_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case LPFC_ELS:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
+		/* Set the els xri abort event flag */
+		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case LPFC_NVME:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+		/* Set the nvme xri abort event flag */
+		phba->hba_flag |= NVME_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0603 Invalid CQ subtype %d: "
+				"%08x %08x %08x %08x\n",
+				cq->subtype, wcqe->word0, wcqe->parameter,
+				wcqe->word2, wcqe->word3);
+		lpfc_sli4_cq_event_release(phba, cq_event);
+		workposted = false;
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
+{
+	bool workposted = false;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+	struct lpfc_nvmet_tgtport *tgtp;
+	struct hbq_dmabuf *dma_buf;
+	uint32_t status, rq_id;
+	unsigned long iflags;
+
+	/* sanity check on queue memory */
+	if (unlikely(!hrq) || unlikely(!drq))
+		return workposted;
+
+	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+	else
+		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+	if (rq_id != hrq->queue_id)
+		goto out;
+
+	status = bf_get(lpfc_rcqe_status, rcqe);
+	switch (status) {
+	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2537 Receive Frame Truncated!!\n");
+	case FC_STATUS_RQ_SUCCESS:
+		lpfc_sli4_rq_release(hrq, drq);
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+		if (!dma_buf) {
+			hrq->RQ_no_buf_found++;
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			goto out;
+		}
+		hrq->RQ_rcv_buf++;
+		hrq->RQ_buf_posted--;
+		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
+
+		/* If a NVME LS event (type 0x28), treat it as Fast path */
+		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
+
+		/* save off the frame for the word thread to process */
+		list_add_tail(&dma_buf->cq_event.list,
+			      &phba->sli4_hba.sp_queue_event);
+		/* Frame received */
+		phba->hba_flag |= HBA_SP_QUEUE_EVT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case FC_STATUS_INSUFF_BUF_FRM_DISC:
+		if (phba->nvmet_support) {
+			tgtp = phba->targetport->private;
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+					"6402 RQE Error x%x, posted %d err_cnt "
+					"%d: %x %x %x\n",
+					status, hrq->RQ_buf_posted,
+					hrq->RQ_no_posted_buf,
+					atomic_read(&tgtp->rcv_fcp_cmd_in),
+					atomic_read(&tgtp->rcv_fcp_cmd_out),
+					atomic_read(&tgtp->xmt_fcp_release));
+		}
+		/* fallthrough */
+
+	case FC_STATUS_INSUFF_BUF_NEED_BUF:
+		hrq->RQ_no_posted_buf++;
+		/* Post more buffers if possible */
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	}
+out:
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue or receive queue completion queue
+ * entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_cqe cqevt;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
+
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_cqe_code, &cqevt)) {
+	case CQE_CODE_COMPL_WQE:
+		/* Process the WQ/RQ complete event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
+				(struct lpfc_wcqe_complete *)&cqevt);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		/* Process the WQ release event */
+		lpfc_sli4_sp_handle_rel_wcqe(phba,
+				(struct lpfc_wcqe_release *)&cqevt);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		/* Process the WQ XRI abort event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+				(struct sli4_wcqe_xri_aborted *)&cqevt);
+		break;
+	case CQE_CODE_RECEIVE:
+	case CQE_CODE_RECEIVE_V1:
+		/* Process the RQ event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_rcqe(phba,
+				(struct lpfc_rcqe *)&cqevt);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0388 Not a valid WCQE code: x%x\n",
+				bf_get(lpfc_cqe_code, &cqevt));
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static int
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+	struct lpfc_queue *speq)
+{
+	struct lpfc_queue *cq = NULL, *childq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	int ecount = 0;
+	uint16_t cqid;
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+	list_for_each_entry(childq, &speq->child_list, list) {
+		if (childq->queue_id == cqid) {
+			cq = childq;
+			break;
+		}
+	}
+	if (unlikely(!cq)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0365 Slow-path CQ identifier "
+					"(%d) does not exist\n", cqid);
+		return 0;
+	}
+
+	/* Save EQ associated with this CQ */
+	cq->assoc_qp = speq;
+
+	/* Process all the entries to the CQ */
+	switch (cq->type) {
+	case LPFC_MCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
+			if (!(++ecount % cq->entry_repost))
+				break;
+			cq->CQ_mbox++;
+		}
+		break;
+	case LPFC_WCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			if ((cq->subtype == LPFC_FCP) ||
+			    (cq->subtype == LPFC_NVME))
+				workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
+								       cqe);
+			else
+				workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+								      cqe);
+			if (!(++ecount % cq->entry_repost))
+				break;
+		}
+
+		/* Track the max number of CQEs processed in 1 EQ */
+		if (ecount > cq->CQ_max_cqe)
+			cq->CQ_max_cqe = ecount;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0370 Invalid completion queue type (%d)\n",
+				cq->type);
+		return 0;
+	}
+
+	/* Catch the no cq entry condition, log an error */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0371 No entry from the CQ: identifier "
+				"(x%x), type (%d)\n", cq->queue_id, cq->type);
+
+	/* In any case, flash and re-arm the RCQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+
+	return ecount;
+}
+
+/**
+ * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to associated CQ
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static void
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_sli_ring *pring = cq->pring;
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq irspiocbq;
+	unsigned long iflags;
+
+	/* Check for response status */
+	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+		/* If resource errors reported from HBA, reduce queue
+		 * depth of the SCSI device.
+		 */
+		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
+		     IOSTAT_LOCAL_REJECT)) &&
+		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
+		     IOERR_NO_RESOURCES))
+			phba->lpfc_rampdown_queue_depth(phba);
+
+		/* Log the error status */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0373 FCP complete error: status=x%x, "
+				"hw_status=x%x, total_data_specified=%d, "
+				"parameter=x%x, word3=x%x\n",
+				bf_get(lpfc_wcqe_c_status, wcqe),
+				bf_get(lpfc_wcqe_c_hw_status, wcqe),
+				wcqe->total_data_placed, wcqe->parameter,
+				wcqe->word3);
+	}
+
+	/* Look up the FCP command IOCB and create pseudo response IOCB */
+	spin_lock_irqsave(&pring->ring_lock, iflags);
+	pring->stats.iocb_event++;
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	spin_unlock_irqrestore(&pring->ring_lock, iflags);
+	if (unlikely(!cmdiocbq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0374 FCP complete with no corresponding "
+				"cmdiocb: iotag (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return;
+	}
+
+	if (cq->assoc_qp)
+		cmdiocbq->isr_timestamp =
+			cq->assoc_qp->isr_timestamp;
+
+	if (cmdiocbq->iocb_cmpl == NULL) {
+		if (cmdiocbq->wqe_cmpl) {
+			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+				spin_lock_irqsave(&phba->hbalock, iflags);
+				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+				spin_unlock_irqrestore(&phba->hbalock, iflags);
+			}
+
+			/* Pass the cmd_iocb and the wcqe to the upper layer */
+			(cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
+			return;
+		}
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0375 FCP cmdiocb not callback function "
+				"iotag: (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return;
+	}
+
+	/* Fake the irspiocb and copy necessary response information */
+	lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
+
+	if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+
+	/* Pass the cmd_iocb and the rsp state to the upper layer */
+	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
+}
+
+/**
+ * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an fast-path WQ entry consumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			     struct lpfc_wcqe_release *wcqe)
+{
+	struct lpfc_queue *childwq;
+	bool wqid_matched = false;
+	uint16_t hba_wqid;
+
+	/* Check for fast-path FCP work queue release */
+	hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+	list_for_each_entry(childwq, &cq->child_list, list) {
+		if (childwq->queue_id == hba_wqid) {
+			lpfc_sli4_wq_release(childwq,
+					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+			wqid_matched = true;
+			break;
+		}
+	}
+	/* Report warning log message if no match found */
+	if (wqid_matched != true)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2580 Fast-path wqe consume event carries "
+				"miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
+}
+
+/**
+ * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			    struct lpfc_rcqe *rcqe)
+{
+	bool workposted = false;
+	struct lpfc_queue *hrq;
+	struct lpfc_queue *drq;
+	struct rqb_dmabuf *dma_buf;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_nvmet_tgtport *tgtp;
+	uint32_t status, rq_id;
+	unsigned long iflags;
+	uint32_t fctl, idx;
+
+	if ((phba->nvmet_support == 0) ||
+	    (phba->sli4_hba.nvmet_cqset == NULL))
+		return workposted;
+
+	idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
+	hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
+	drq = phba->sli4_hba.nvmet_mrq_data[idx];
+
+	/* sanity check on queue memory */
+	if (unlikely(!hrq) || unlikely(!drq))
+		return workposted;
+
+	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+	else
+		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+
+	if ((phba->nvmet_support == 0) ||
+	    (rq_id != hrq->queue_id))
+		return workposted;
+
+	status = bf_get(lpfc_rcqe_status, rcqe);
+	switch (status) {
+	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"6126 Receive Frame Truncated!!\n");
+		/* Drop thru */
+	case FC_STATUS_RQ_SUCCESS:
+		lpfc_sli4_rq_release(hrq, drq);
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
+		if (!dma_buf) {
+			hrq->RQ_no_buf_found++;
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			goto out;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		hrq->RQ_rcv_buf++;
+		hrq->RQ_buf_posted--;
+		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
+
+		/* Just some basic sanity checks on FCP Command frame */
+		fctl = (fc_hdr->fh_f_ctl[0] << 16 |
+		fc_hdr->fh_f_ctl[1] << 8 |
+		fc_hdr->fh_f_ctl[2]);
+		if (((fctl &
+		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
+		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
+		    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
+			goto drop;
+
+		if (fc_hdr->fh_type == FC_TYPE_FCP) {
+			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
+			lpfc_nvmet_unsol_fcp_event(
+				phba, idx, dma_buf,
+				cq->assoc_qp->isr_timestamp);
+			return false;
+		}
+drop:
+		lpfc_in_buf_free(phba, &dma_buf->dbuf);
+		break;
+	case FC_STATUS_INSUFF_BUF_FRM_DISC:
+		if (phba->nvmet_support) {
+			tgtp = phba->targetport->private;
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
+					"6401 RQE Error x%x, posted %d err_cnt "
+					"%d: %x %x %x\n",
+					status, hrq->RQ_buf_posted,
+					hrq->RQ_no_posted_buf,
+					atomic_read(&tgtp->rcv_fcp_cmd_in),
+					atomic_read(&tgtp->rcv_fcp_cmd_out),
+					atomic_read(&tgtp->xmt_fcp_release));
+		}
+		/* fallthrough */
+
+	case FC_STATUS_INSUFF_BUF_NEED_BUF:
+		hrq->RQ_no_posted_buf++;
+		/* Post more buffers if possible */
+		break;
+	}
+out:
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
+ * @cq: Pointer to the completion queue.
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static int
+lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_wcqe_release wcqe;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+	case CQE_CODE_COMPL_WQE:
+	case CQE_CODE_NVME_ERSP:
+		cq->CQ_wq++;
+		/* Process the WQ complete event */
+		phba->last_completion_time = jiffies;
+		if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
+			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
+				(struct lpfc_wcqe_complete *)&wcqe);
+		if (cq->subtype == LPFC_NVME_LS)
+			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
+				(struct lpfc_wcqe_complete *)&wcqe);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		cq->CQ_release_wqe++;
+		/* Process the WQ release event */
+		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
+				(struct lpfc_wcqe_release *)&wcqe);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		cq->CQ_xri_aborted++;
+		/* Process the WQ XRI abort event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+				(struct sli4_wcqe_xri_aborted *)&wcqe);
+		break;
+	case CQE_CODE_RECEIVE_V1:
+	case CQE_CODE_RECEIVE:
+		phba->last_completion_time = jiffies;
+		if (cq->subtype == LPFC_NVMET) {
+			workposted = lpfc_sli4_nvmet_handle_rcqe(
+				phba, cq, (struct lpfc_rcqe *)&wcqe);
+		}
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0144 Not a valid CQE code: x%x\n",
+				bf_get(lpfc_wcqe_c_code, &wcqe));
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static int
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+			uint32_t qidx)
+{
+	struct lpfc_queue *cq = NULL;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	uint16_t cqid, id;
+	int ecount = 0;
+
+	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0366 Not a valid completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get_le32(lpfc_eqe_major_code, eqe),
+				bf_get_le32(lpfc_eqe_minor_code, eqe));
+		return 0;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
+		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
+		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
+			/* Process NVMET unsol rcv */
+			cq = phba->sli4_hba.nvmet_cqset[cqid - id];
+			goto  process_cq;
+		}
+	}
+
+	if (phba->sli4_hba.nvme_cq_map &&
+	    (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
+		/* Process NVME / NVMET command completion */
+		cq = phba->sli4_hba.nvme_cq[qidx];
+		goto  process_cq;
+	}
+
+	if (phba->sli4_hba.fcp_cq_map &&
+	    (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
+		/* Process FCP command completion */
+		cq = phba->sli4_hba.fcp_cq[qidx];
+		goto  process_cq;
+	}
+
+	if (phba->sli4_hba.nvmels_cq &&
+	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
+		/* Process NVME unsol rcv */
+		cq = phba->sli4_hba.nvmels_cq;
+	}
+
+	/* Otherwise this is a Slow path event */
+	if (cq == NULL) {
+		ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
+						 phba->sli4_hba.hba_eq[qidx]);
+		return ecount;
+	}
+
+process_cq:
+	if (unlikely(cqid != cq->queue_id)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0368 Miss-matched fast-path completion "
+				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
+				cqid, cq->queue_id);
+		return 0;
+	}
+
+	/* Save EQ associated with this CQ */
+	cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
+
+	/* Process all the entries to the CQ */
+	while ((cqe = lpfc_sli4_cq_get(cq))) {
+		workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
+		if (!(++ecount % cq->entry_repost))
+			break;
+	}
+
+	/* Track the max number of CQEs processed in 1 EQ */
+	if (ecount > cq->CQ_max_cqe)
+		cq->CQ_max_cqe = ecount;
+	cq->assoc_qp->EQ_cqe_cnt += ecount;
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0369 No entry from fast-path completion "
+				"queue fcpcqid=%d\n", cq->queue_id);
+
+	/* In any case, flash and re-arm the CQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+
+	return ecount;
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	struct lpfc_eqe *eqe;
+
+	/* walk all the EQ entries and drop on the floor */
+	while ((eqe = lpfc_sli4_eq_get(eq)))
+		;
+
+	/* Clear and re-arm the EQ */
+	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+}
+
+
+/**
+ * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
+ *			     entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the Flash Optimized Fabric
+ * event queue.  It will check the MajorCode and MinorCode to determine this
+ * is for a completion event on a completion queue, if not, an error shall be
+ * logged and just return. Otherwise, it will get to the corresponding
+ * completion queue and process all the entries on the completion queue, rearm
+ * the completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+	struct lpfc_queue *cq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	uint16_t cqid;
+	int ecount = 0;
+
+	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9147 Not a valid completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get_le32(lpfc_eqe_major_code, eqe),
+				bf_get_le32(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+	/* Next check for OAS */
+	cq = phba->sli4_hba.oas_cq;
+	if (unlikely(!cq)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"9148 OAS completion queue "
+					"does not exist\n");
+		return;
+	}
+
+	if (unlikely(cqid != cq->queue_id)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9149 Miss-matched fast-path compl "
+				"queue id: eqcqid=%d, fcpcqid=%d\n",
+				cqid, cq->queue_id);
+		return;
+	}
+
+	/* Save EQ associated with this CQ */
+	cq->assoc_qp = phba->sli4_hba.fof_eq;
+
+	/* Process all the entries to the OAS CQ */
+	while ((cqe = lpfc_sli4_cq_get(cq))) {
+		workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
+		if (!(++ecount % cq->entry_repost))
+			break;
+	}
+
+	/* Track the max number of CQEs processed in 1 EQ */
+	if (ecount > cq->CQ_max_cqe)
+		cq->CQ_max_cqe = ecount;
+	cq->assoc_qp->EQ_cqe_cnt += ecount;
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"9153 No entry from fast-path completion "
+				"queue fcpcqid=%d\n", cq->queue_id);
+
+	/* In any case, flash and re-arm the CQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
+ * IOCB ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The Flash Optimized Fabric ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the EQ to CQ are one-to-one map such that the EQ index is
+ * equal to that of CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_hba_eq_hdl *hba_eq_hdl;
+	struct lpfc_queue *eq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+
+	/* Get the driver's phba structure from the dev_id */
+	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+	phba = hba_eq_hdl->phba;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	eq = phba->sli4_hba.fof_eq;
+	if (unlikely(!eq))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, eq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP fast-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(eq))) {
+		lpfc_sli4_fof_handle_eqe(phba, eqe);
+		if (!(++ecount % eq->entry_repost))
+			break;
+		eq->EQ_processed++;
+	}
+
+	/* Track the max number of EQEs processed in 1 intr */
+	if (ecount > eq->EQ_max_eqe)
+		eq->EQ_max_eqe = ecount;
+
+
+	if (unlikely(ecount == 0)) {
+		eq->EQ_no_entry++;
+
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"9145 MSI-X interrupt with no EQE\n");
+		else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"9146 ISR interrupt with no EQE\n");
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+		}
+	}
+	/* Always clear and re-arm the fast-path EQ */
+	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+	return IRQ_HANDLED;
+}
+
+/**
+ * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
+ * equal to that of FCP CQ index.
+ *
+ * The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_hba_eq_hdl *hba_eq_hdl;
+	struct lpfc_queue *fpeq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+	int ccount = 0;
+	int hba_eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+	phba = hba_eq_hdl->phba;
+	hba_eqidx = hba_eq_hdl->idx;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+	if (unlikely(!phba->sli4_hba.hba_eq))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
+	if (unlikely(!fpeq))
+		return IRQ_NONE;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (phba->ktime_on)
+		fpeq->isr_timestamp = ktime_get_ns();
+#endif
+
+	if (lpfc_fcp_look_ahead) {
+		if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
+			lpfc_sli4_eq_clr_intr(fpeq);
+		else {
+			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
+			return IRQ_NONE;
+		}
+	}
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, fpeq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		if (lpfc_fcp_look_ahead)
+			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP fast-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+		if (eqe == NULL)
+			break;
+
+		ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+		if (!(++ecount % fpeq->entry_repost) ||
+		    ccount > LPFC_MAX_ISR_CQE)
+			break;
+		fpeq->EQ_processed++;
+	}
+
+	/* Track the max number of EQEs processed in 1 intr */
+	if (ecount > fpeq->EQ_max_eqe)
+		fpeq->EQ_max_eqe = ecount;
+
+	/* Always clear and re-arm the fast-path EQ */
+	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+	if (unlikely(ecount == 0)) {
+		fpeq->EQ_no_entry++;
+
+		if (lpfc_fcp_look_ahead) {
+			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
+			return IRQ_NONE;
+		}
+
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0358 MSI-X interrupt with no EQE\n");
+		else
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+	}
+
+	if (lpfc_fcp_look_ahead)
+		atomic_inc(&hba_eq_hdl->hba_eq_in_use);
+
+	return IRQ_HANDLED;
+} /* lpfc_sli4_fp_intr_handler */
+
+/**
+ * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler to device with SLI-4
+ * interface spec, called from the PCI layer when either MSI or Pin-IRQ
+ * interrupt mode is enabled and there is an event in the HBA which requires
+ * driver attention. This function invokes the slow-path interrupt attention
+ * handling function and fast-path interrupt attention handling function in
+ * turn to process the relevant HBA attention events. This function is called
+ * without any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	irqreturn_t hba_irq_rc;
+	bool hba_handled = false;
+	int qidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Invoke fast-path host attention interrupt handling as appropriate.
+	 */
+	for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
+		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
+					&phba->sli4_hba.hba_eq_hdl[qidx]);
+		if (hba_irq_rc == IRQ_HANDLED)
+			hba_handled |= true;
+	}
+
+	if (phba->cfg_fof) {
+		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
+					&phba->sli4_hba.hba_eq_hdl[qidx]);
+		if (hba_irq_rc == IRQ_HANDLED)
+			hba_handled |= true;
+	}
+
+	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
+} /* lpfc_sli4_intr_handler */
+
+/**
+ * lpfc_sli4_queue_free - free a queue structure and associated memory
+ * @queue: The queue structure to free.
+ *
+ * This function frees a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called after destroying the
+ * queue on the HBA.
+ **/
+void
+lpfc_sli4_queue_free(struct lpfc_queue *queue)
+{
+	struct lpfc_dmabuf *dmabuf;
+
+	if (!queue)
+		return;
+
+	while (!list_empty(&queue->page_list)) {
+		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
+				 list);
+		dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+	if (queue->rqbp) {
+		lpfc_free_rq_buffer(queue->phba, queue);
+		kfree(queue->rqbp);
+	}
+
+	if (!list_empty(&queue->wq_list))
+		list_del(&queue->wq_list);
+
+	kfree(queue);
+	return;
+}
+
+/**
+ * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
+ * @phba: The HBA that this queue is being created on.
+ * @entry_size: The size of each queue entry for this queue.
+ * @entry count: The number of entries that this queue will handle.
+ *
+ * This function allocates a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called before creating the
+ * queue on the HBA.
+ **/
+struct lpfc_queue *
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+		      uint32_t entry_count)
+{
+	struct lpfc_queue *queue;
+	struct lpfc_dmabuf *dmabuf;
+	int x, total_qe_count;
+	void *dma_pointer;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	queue = kzalloc(sizeof(struct lpfc_queue) +
+			(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
+	if (!queue)
+		return NULL;
+	queue->page_count = (ALIGN(entry_size * entry_count,
+			hw_page_size))/hw_page_size;
+
+	/* If needed, Adjust page count to match the max the adapter supports */
+	if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
+	    (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
+		queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+
+	INIT_LIST_HEAD(&queue->list);
+	INIT_LIST_HEAD(&queue->wq_list);
+	INIT_LIST_HEAD(&queue->page_list);
+	INIT_LIST_HEAD(&queue->child_list);
+	for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
+		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!dmabuf)
+			goto out_fail;
+		dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+						   hw_page_size, &dmabuf->phys,
+						   GFP_KERNEL);
+		if (!dmabuf->virt) {
+			kfree(dmabuf);
+			goto out_fail;
+		}
+		dmabuf->buffer_tag = x;
+		list_add_tail(&dmabuf->list, &queue->page_list);
+		/* initialize queue's entry array */
+		dma_pointer = dmabuf->virt;
+		for (; total_qe_count < entry_count &&
+		     dma_pointer < (hw_page_size + dmabuf->virt);
+		     total_qe_count++, dma_pointer += entry_size) {
+			queue->qe[total_qe_count].address = dma_pointer;
+		}
+	}
+	queue->entry_size = entry_size;
+	queue->entry_count = entry_count;
+	queue->phba = phba;
+
+	/* entry_repost will be set during q creation */
+
+	return queue;
+out_fail:
+	lpfc_sli4_queue_free(queue);
+	return NULL;
+}
+
+/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+	if (!phba->pcidev)
+		return NULL;
+
+	switch (pci_barset) {
+	case WQ_PCI_BAR_0_AND_1:
+		return phba->pci_bar0_memmap_p;
+	case WQ_PCI_BAR_2_AND_3:
+		return phba->pci_bar2_memmap_p;
+	case WQ_PCI_BAR_4_AND_5:
+		return phba->pci_bar4_memmap_p;
+	default:
+		break;
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @startq: The starting FCP EQ to modify
+ *
+ * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
+ * updated in one mailbox command.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @startq
+ * is used to get the starting FCP EQ to change.
+ * This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+			 uint32_t numq, uint32_t imax)
+{
+	struct lpfc_mbx_modify_eq_delay *eq_delay;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_queue *eq;
+	int cnt, rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t result, val;
+	int qidx;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint16_t dmult;
+
+	if (startq >= phba->io_channel_irqs)
+		return 0;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	eq_delay = &mbox->u.mqe.un.eq_delay;
+
+	/* Calculate delay multiper from maximum interrupt per second */
+	result = imax / phba->io_channel_irqs;
+	if (result > LPFC_DMULT_CONST || result == 0)
+		dmult = 0;
+	else
+		dmult = LPFC_DMULT_CONST/result - 1;
+	if (dmult > LPFC_DMULT_MAX)
+		dmult = LPFC_DMULT_MAX;
+
+	cnt = 0;
+	for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
+		eq = phba->sli4_hba.hba_eq[qidx];
+		if (!eq)
+			continue;
+		eq->q_mode = imax;
+		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
+		eq_delay->u.request.eq[cnt].phase = 0;
+		eq_delay->u.request.eq[cnt].delay_multi = dmult;
+		cnt++;
+
+		/* q_mode is only used for auto_imax */
+		if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+			/* Use EQ Delay Register method for q_mode */
+
+			/* Convert for EQ Delay register */
+			val =  phba->cfg_fcp_imax;
+			if (val) {
+				/* First, interrupts per sec per EQ */
+				val = phba->cfg_fcp_imax /
+					phba->io_channel_irqs;
+
+				/* us delay between each interrupt */
+				val = LPFC_SEC_TO_USEC / val;
+			}
+			eq->q_mode = val;
+		} else {
+			eq->q_mode = imax;
+		}
+
+		if (cnt >= numq)
+			break;
+	}
+	eq_delay->u.request.num_eq = cnt;
+
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->context1 = NULL;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2512 MODIFY_EQ_DELAY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_eq_create - Create an Event Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @eq: The queue structure to use to create the event queue.
+ * @imax: The maximum interrupt per second limit.
+ *
+ * This function creates an event queue, as detailed in @eq, on a port,
+ * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @eq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the EQ_CREATE mailbox command to the HBA to setup the
+ * event queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
+{
+	struct lpfc_mbx_eq_create *eq_create;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint16_t dmult;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!eq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_eq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_EQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	eq_create = &mbox->u.mqe.un.eq_create;
+	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
+	       eq->page_count);
+	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
+	       LPFC_EQE_SIZE);
+	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
+	/* don't setup delay multiplier using EQ_CREATE */
+	dmult = 0;
+	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
+	       dmult);
+	switch (eq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0360 Unsupported EQ count. (%d)\n",
+				eq->entry_count);
+		if (eq->entry_count < 256)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 256:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_256);
+		break;
+	case 512:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_512);
+		break;
+	case 1024:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_1024);
+		break;
+	case 2048:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_2048);
+		break;
+	case 4096:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_4096);
+		break;
+	}
+	list_for_each_entry(dmabuf, &eq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->context1 = NULL;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2500 EQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	eq->type = LPFC_EQ;
+	eq->subtype = LPFC_NONE;
+	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
+	if (eq->queue_id == 0xFFFF)
+		status = -ENXIO;
+	eq->host_index = 0;
+	eq->hba_index = 0;
+	eq->entry_repost = LPFC_EQ_REPOST;
+
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_create - Create a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cq: The queue structure to use to create the completion queue.
+ * @eq: The event queue to bind this completion queue to.
+ *
+ * This function creates a completion queue, as detailed in @wq, on a port,
+ * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CQ_CREATE mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
+	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
+{
+	struct lpfc_mbx_cq_create *cq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!cq || !eq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_cq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_CQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	cq_create = &mbox->u.mqe.un.cq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
+	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
+		    cq->page_count);
+	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
+	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.cqv);
+	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+		/* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
+		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+		       eq->queue_id);
+	} else {
+		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+		       eq->queue_id);
+	}
+	switch (cq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0361 Unsupported CQ count: "
+				"entry cnt %d sz %d pg cnt %d\n",
+				cq->entry_count, cq->entry_size,
+				cq->page_count);
+		if (cq->entry_count < 256) {
+			status = -EINVAL;
+			goto out;
+		}
+		/* otherwise default to smallest count (drop through) */
+	case 256:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_256);
+		break;
+	case 512:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_512);
+		break;
+	case 1024:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_1024);
+		break;
+	}
+	list_for_each_entry(dmabuf, &cq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2501 CQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	if (cq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	/* link the cq onto the parent eq child list */
+	list_add_tail(&cq->list, &eq->child_list);
+	/* Set up completion queue's type and subtype */
+	cq->type = type;
+	cq->subtype = subtype;
+	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	cq->assoc_qid = eq->queue_id;
+	cq->host_index = 0;
+	cq->hba_index = 0;
+	cq->entry_repost = LPFC_CQ_REPOST;
+
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cqp: The queue structure array to use to create the completion queues.
+ * @eqp: The event queue array to bind these completion queues to.
+ *
+ * This function creates a set of  completion queue, s to support MRQ
+ * as detailed in @cqp, on a port,
+ * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
+		   struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
+{
+	struct lpfc_queue *cq;
+	struct lpfc_queue *eq;
+	struct lpfc_mbx_cq_create_set *cq_set;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, alloclen, status = 0;
+	int cnt, idx, numcq, page_idx = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	numcq = phba->cfg_nvmet_mrq;
+	if (!cqp || !eqp || !numcq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	length = sizeof(struct lpfc_mbx_cq_create_set);
+	length += ((numcq * cqp[0]->page_count) *
+		   sizeof(struct dma_address));
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
+			LPFC_SLI4_MBX_NEMBED);
+	if (alloclen < length) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3098 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory size "
+				"(%d)\n", alloclen, length);
+		status = -ENOMEM;
+		goto out;
+	}
+	cq_set = mbox->sge_array->addr[0];
+	shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
+
+	for (idx = 0; idx < numcq; idx++) {
+		cq = cqp[idx];
+		eq = eqp[idx];
+		if (!cq || !eq) {
+			status = -ENOMEM;
+			goto out;
+		}
+
+		switch (idx) {
+		case 0:
+			bf_set(lpfc_mbx_cq_create_set_page_size,
+			       &cq_set->u.request,
+			       (hw_page_size / SLI4_PAGE_SIZE));
+			bf_set(lpfc_mbx_cq_create_set_num_pages,
+			       &cq_set->u.request, cq->page_count);
+			bf_set(lpfc_mbx_cq_create_set_evt,
+			       &cq_set->u.request, 1);
+			bf_set(lpfc_mbx_cq_create_set_valid,
+			       &cq_set->u.request, 1);
+			bf_set(lpfc_mbx_cq_create_set_cqe_size,
+			       &cq_set->u.request, 0);
+			bf_set(lpfc_mbx_cq_create_set_num_cq,
+			       &cq_set->u.request, numcq);
+			switch (cq->entry_count) {
+			default:
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"3118 Bad CQ count. (%d)\n",
+						cq->entry_count);
+				if (cq->entry_count < 256) {
+					status = -EINVAL;
+					goto out;
+				}
+				/* otherwise default to smallest (drop thru) */
+			case 256:
+				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+				       &cq_set->u.request, LPFC_CQ_CNT_256);
+				break;
+			case 512:
+				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+				       &cq_set->u.request, LPFC_CQ_CNT_512);
+				break;
+			case 1024:
+				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+				       &cq_set->u.request, LPFC_CQ_CNT_1024);
+				break;
+			}
+			bf_set(lpfc_mbx_cq_create_set_eq_id0,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 1:
+			bf_set(lpfc_mbx_cq_create_set_eq_id1,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 2:
+			bf_set(lpfc_mbx_cq_create_set_eq_id2,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 3:
+			bf_set(lpfc_mbx_cq_create_set_eq_id3,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 4:
+			bf_set(lpfc_mbx_cq_create_set_eq_id4,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 5:
+			bf_set(lpfc_mbx_cq_create_set_eq_id5,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 6:
+			bf_set(lpfc_mbx_cq_create_set_eq_id6,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 7:
+			bf_set(lpfc_mbx_cq_create_set_eq_id7,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 8:
+			bf_set(lpfc_mbx_cq_create_set_eq_id8,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 9:
+			bf_set(lpfc_mbx_cq_create_set_eq_id9,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 10:
+			bf_set(lpfc_mbx_cq_create_set_eq_id10,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 11:
+			bf_set(lpfc_mbx_cq_create_set_eq_id11,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 12:
+			bf_set(lpfc_mbx_cq_create_set_eq_id12,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 13:
+			bf_set(lpfc_mbx_cq_create_set_eq_id13,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 14:
+			bf_set(lpfc_mbx_cq_create_set_eq_id14,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		case 15:
+			bf_set(lpfc_mbx_cq_create_set_eq_id15,
+			       &cq_set->u.request, eq->queue_id);
+			break;
+		}
+
+		/* link the cq onto the parent eq child list */
+		list_add_tail(&cq->list, &eq->child_list);
+		/* Set up completion queue's type and subtype */
+		cq->type = type;
+		cq->subtype = subtype;
+		cq->assoc_qid = eq->queue_id;
+		cq->host_index = 0;
+		cq->hba_index = 0;
+		cq->entry_repost = LPFC_CQ_REPOST;
+
+		rc = 0;
+		list_for_each_entry(dmabuf, &cq->page_list, list) {
+			memset(dmabuf->virt, 0, hw_page_size);
+			cnt = page_idx + dmabuf->buffer_tag;
+			cq_set->u.request.page[cnt].addr_lo =
+					putPaddrLow(dmabuf->phys);
+			cq_set->u.request.page[cnt].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+			rc++;
+		}
+		page_idx += rc;
+	}
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3119 CQ_CREATE_SET mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
+	if (rc == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+
+	for (idx = 0; idx < numcq; idx++) {
+		cq = cqp[idx];
+		cq->queue_id = rc + idx;
+	}
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	return status;
+}
+
+/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations.  It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+	struct lpfc_mbx_mq_create *mq_create;
+	struct lpfc_dmabuf *dmabuf;
+	int length;
+
+	length = (sizeof(struct lpfc_mbx_mq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	mq_create = &mbox->u.mqe.un.mq_create;
+	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+	       mq->page_count);
+	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+	switch (mq->entry_count) {
+	case 16:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_16);
+		break;
+	case 32:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_32);
+		break;
+	case 64:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_64);
+		break;
+	case 128:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_128);
+		break;
+	}
+	list_for_each_entry(dmabuf, &mq->page_list, list) {
+		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+			putPaddrLow(dmabuf->phys);
+		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+			putPaddrHigh(dmabuf->phys);
+	}
+}
+
+/**
+ * lpfc_mq_create - Create a mailbox Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
+ *
+ * This function creates a mailbox queue, as detailed in @mq, on a port,
+ * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the MQ_CREATE mailbox command to the HBA to setup the
+ * mailbox queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int32_t
+lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
+	       struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_mq_create *mq_create;
+	struct lpfc_mbx_mq_create_ext *mq_create_ext;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!mq || !cq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+	bf_set(lpfc_mbx_mq_create_ext_num_pages,
+	       &mq_create_ext->u.request, mq->page_count);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.mqv);
+	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+		       cq->queue_id);
+	else
+		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+		       cq->queue_id);
+	switch (mq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0362 Unsupported MQ count. (%d)\n",
+				mq->entry_count);
+		if (mq->entry_count < 16) {
+			status = -EINVAL;
+			goto out;
+		}
+		/* otherwise default to smallest count (drop through) */
+	case 16:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_16);
+		break;
+	case 32:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_32);
+		break;
+	case 64:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_64);
+		break;
+	case 128:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_128);
+		break;
+	}
+	list_for_each_entry(dmabuf, &mq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+			      &mq_create_ext->u.response);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2795 MQ_CREATE_EXT failed with "
+				"status x%x. Failback to MQ_CREATE.\n",
+				rc);
+		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+		mq_create = &mbox->u.mqe.un.mq_create;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+				      &mq_create->u.response);
+	}
+
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2502 MQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	if (mq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	mq->type = LPFC_MQ;
+	mq->assoc_qid = cq->queue_id;
+	mq->subtype = subtype;
+	mq->host_index = 0;
+	mq->hba_index = 0;
+	mq->entry_repost = LPFC_MQ_REPOST;
+
+	/* link the mq onto the parent cq child list */
+	list_add_tail(&mq->list, &cq->child_list);
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_wq_create - Create a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @wq: The queue structure to use to create the work queue.
+ * @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
+ *
+ * This function creates a work queue, as detailed in @wq, on a port, described
+ * by @phba by sending a WQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @wq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @cq
+ * is used to indicate which completion queue to bind this work queue to. This
+ * function will send the WQ_CREATE mailbox command to the HBA to setup the
+ * work queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+	       struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_wq_create *wq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+	struct dma_address *page;
+	void __iomem *bar_memmap_p;
+	uint32_t db_offset;
+	uint16_t pci_barset;
+
+	/* sanity check on queue memory */
+	if (!wq || !cq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_wq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	wq_create = &mbox->u.mqe.un.wq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
+	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
+		    wq->page_count);
+	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
+		    cq->queue_id);
+
+	/* wqv is the earliest version supported, NOT the latest */
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.wqv);
+
+	switch (phba->sli4_hba.pc_sli4_params.wqv) {
+	case LPFC_Q_CREATE_VERSION_0:
+		switch (wq->entry_size) {
+		default:
+		case 64:
+			/* Nothing to do, version 0 ONLY supports 64 byte */
+			page = wq_create->u.request.page;
+			break;
+		case 128:
+			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+			    LPFC_WQ_SZ128_SUPPORT)) {
+				status = -ERANGE;
+				goto out;
+			}
+			/* If we get here the HBA MUST also support V1 and
+			 * we MUST use it
+			 */
+			bf_set(lpfc_mbox_hdr_version, &shdr->request,
+			       LPFC_Q_CREATE_VERSION_1);
+
+			bf_set(lpfc_mbx_wq_create_wqe_count,
+			       &wq_create->u.request_1, wq->entry_count);
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_128);
+			bf_set(lpfc_mbx_wq_create_page_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_PAGE_SIZE_4096);
+			page = wq_create->u.request_1.page;
+			break;
+		}
+		break;
+	case LPFC_Q_CREATE_VERSION_1:
+		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+		       wq->entry_count);
+		bf_set(lpfc_mbox_hdr_version, &shdr->request,
+		       LPFC_Q_CREATE_VERSION_1);
+
+		switch (wq->entry_size) {
+		default:
+		case 64:
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_64);
+			break;
+		case 128:
+			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+				LPFC_WQ_SZ128_SUPPORT)) {
+				status = -ERANGE;
+				goto out;
+			}
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_128);
+			break;
+		}
+		bf_set(lpfc_mbx_wq_create_page_size,
+		       &wq_create->u.request_1,
+		       LPFC_WQ_PAGE_SIZE_4096);
+		page = wq_create->u.request_1.page;
+		break;
+	default:
+		status = -ERANGE;
+		goto out;
+	}
+
+	list_for_each_entry(dmabuf, &wq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
+	}
+
+	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2503 WQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
+	if (wq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+		wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+				       &wq_create->u.response);
+		if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+		    (wq->db_format != LPFC_DB_RING_FORMAT)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3265 WQ[%d] doorbell format not "
+					"supported: x%x\n", wq->queue_id,
+					wq->db_format);
+			status = -EINVAL;
+			goto out;
+		}
+		pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+				    &wq_create->u.response);
+		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+		if (!bar_memmap_p) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3263 WQ[%d] failed to memmap pci "
+					"barset:x%x\n", wq->queue_id,
+					pci_barset);
+			status = -ENOMEM;
+			goto out;
+		}
+		db_offset = wq_create->u.response.doorbell_offset;
+		if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+		    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3252 WQ[%d] doorbell offset not "
+					"supported: x%x\n", wq->queue_id,
+					db_offset);
+			status = -EINVAL;
+			goto out;
+		}
+		wq->db_regaddr = bar_memmap_p + db_offset;
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3264 WQ[%d]: barset:x%x, offset:x%x, "
+				"format:x%x\n", wq->queue_id, pci_barset,
+				db_offset, wq->db_format);
+	} else {
+		wq->db_format = LPFC_DB_LIST_FORMAT;
+		wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+	}
+	wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+	if (wq->pring == NULL) {
+		status = -ENOMEM;
+		goto out;
+	}
+	wq->type = LPFC_WQ;
+	wq->assoc_qid = cq->queue_id;
+	wq->subtype = subtype;
+	wq->host_index = 0;
+	wq->hba_index = 0;
+	wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
+
+	/* link the wq onto the parent cq child list */
+	list_add_tail(&wq->list, &cq->child_list);
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_rq_create - Create a Receive Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrq: The queue structure to use to create the header receive queue.
+ * @drq: The queue structure to use to create the data receive queue.
+ * @cq: The completion queue to bind this work queue to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_rq_create *rq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+	void __iomem *bar_memmap_p;
+	uint32_t db_offset;
+	uint16_t pci_barset;
+
+	/* sanity check on queue memory */
+	if (!hrq || !drq || !cq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	if (hrq->entry_count != drq->entry_count)
+		return -EINVAL;
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_rq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	rq_create = &mbox->u.mqe.un.rq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.rqv);
+	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_rq_context_rqe_count_1,
+		       &rq_create->u.request.context,
+		       hrq->entry_count);
+		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+		bf_set(lpfc_rq_context_rqe_size,
+		       &rq_create->u.request.context,
+		       LPFC_RQE_SIZE_8);
+		bf_set(lpfc_rq_context_page_size,
+		       &rq_create->u.request.context,
+		       LPFC_RQ_PAGE_SIZE_4096);
+	} else {
+		switch (hrq->entry_count) {
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2535 Unsupported RQ count. (%d)\n",
+					hrq->entry_count);
+			if (hrq->entry_count < 512) {
+				status = -EINVAL;
+				goto out;
+			}
+			/* otherwise default to smallest count (drop through) */
+		case 512:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_512);
+			break;
+		case 1024:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_1024);
+			break;
+		case 2048:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_2048);
+			break;
+		case 4096:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_4096);
+			break;
+		}
+		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+		       LPFC_HDR_BUF_SIZE);
+	}
+	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+	       hrq->page_count);
+	list_for_each_entry(dmabuf, &hrq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2504 RQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (hrq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+
+	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+					&rq_create->u.response);
+		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3262 RQ [%d] doorbell format not "
+					"supported: x%x\n", hrq->queue_id,
+					hrq->db_format);
+			status = -EINVAL;
+			goto out;
+		}
+
+		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+				    &rq_create->u.response);
+		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+		if (!bar_memmap_p) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3269 RQ[%d] failed to memmap pci "
+					"barset:x%x\n", hrq->queue_id,
+					pci_barset);
+			status = -ENOMEM;
+			goto out;
+		}
+
+		db_offset = rq_create->u.response.doorbell_offset;
+		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3270 RQ[%d] doorbell offset not "
+					"supported: x%x\n", hrq->queue_id,
+					db_offset);
+			status = -EINVAL;
+			goto out;
+		}
+		hrq->db_regaddr = bar_memmap_p + db_offset;
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+				"format:x%x\n", hrq->queue_id, pci_barset,
+				db_offset, hrq->db_format);
+	} else {
+		hrq->db_format = LPFC_DB_RING_FORMAT;
+		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+	}
+	hrq->type = LPFC_HRQ;
+	hrq->assoc_qid = cq->queue_id;
+	hrq->subtype = subtype;
+	hrq->host_index = 0;
+	hrq->hba_index = 0;
+	hrq->entry_repost = LPFC_RQ_REPOST;
+
+	/* now create the data queue */
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.rqv);
+	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_rq_context_rqe_count_1,
+		       &rq_create->u.request.context, hrq->entry_count);
+		if (subtype == LPFC_NVMET)
+			rq_create->u.request.context.buffer_size =
+				LPFC_NVMET_DATA_BUF_SIZE;
+		else
+			rq_create->u.request.context.buffer_size =
+				LPFC_DATA_BUF_SIZE;
+		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
+		       LPFC_RQE_SIZE_8);
+		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
+		       (PAGE_SIZE/SLI4_PAGE_SIZE));
+	} else {
+		switch (drq->entry_count) {
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2536 Unsupported RQ count. (%d)\n",
+					drq->entry_count);
+			if (drq->entry_count < 512) {
+				status = -EINVAL;
+				goto out;
+			}
+			/* otherwise default to smallest count (drop through) */
+		case 512:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_512);
+			break;
+		case 1024:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_1024);
+			break;
+		case 2048:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_2048);
+			break;
+		case 4096:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_4096);
+			break;
+		}
+		if (subtype == LPFC_NVMET)
+			bf_set(lpfc_rq_context_buf_size,
+			       &rq_create->u.request.context,
+			       LPFC_NVMET_DATA_BUF_SIZE);
+		else
+			bf_set(lpfc_rq_context_buf_size,
+			       &rq_create->u.request.context,
+			       LPFC_DATA_BUF_SIZE);
+	}
+	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+	       drq->page_count);
+	list_for_each_entry(dmabuf, &drq->page_list, list) {
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		status = -ENXIO;
+		goto out;
+	}
+	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (drq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	drq->type = LPFC_DRQ;
+	drq->assoc_qid = cq->queue_id;
+	drq->subtype = subtype;
+	drq->host_index = 0;
+	drq->hba_index = 0;
+	drq->entry_repost = LPFC_RQ_REPOST;
+
+	/* link the header and data RQs onto the parent cq child list */
+	list_add_tail(&hrq->list, &cq->child_list);
+	list_add_tail(&drq->list, &cq->child_list);
+
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrqp: The queue structure array to use to create the header receive queues.
+ * @drqp: The queue structure array to use to create the data receive queues.
+ * @cqp: The completion queue array to bind these receive queues to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
+		struct lpfc_queue **drqp, struct lpfc_queue **cqp,
+		uint32_t subtype)
+{
+	struct lpfc_queue *hrq, *drq, *cq;
+	struct lpfc_mbx_rq_create_v2 *rq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, alloclen, status = 0;
+	int cnt, idx, numrq, page_idx = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	numrq = phba->cfg_nvmet_mrq;
+	/* sanity check on array memory */
+	if (!hrqp || !drqp || !cqp || !numrq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	length = sizeof(struct lpfc_mbx_rq_create_v2);
+	length += ((2 * numrq * hrqp[0]->page_count) *
+		   sizeof(struct dma_address));
+
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+				    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
+				    LPFC_SLI4_MBX_NEMBED);
+	if (alloclen < length) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3099 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory size "
+				"(%d)\n", alloclen, length);
+		status = -ENOMEM;
+		goto out;
+	}
+
+
+
+	rq_create = mbox->sge_array->addr[0];
+	shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
+
+	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
+	cnt = 0;
+
+	for (idx = 0; idx < numrq; idx++) {
+		hrq = hrqp[idx];
+		drq = drqp[idx];
+		cq  = cqp[idx];
+
+		/* sanity check on queue memory */
+		if (!hrq || !drq || !cq) {
+			status = -ENODEV;
+			goto out;
+		}
+
+		if (hrq->entry_count != drq->entry_count) {
+			status = -EINVAL;
+			goto out;
+		}
+
+		if (idx == 0) {
+			bf_set(lpfc_mbx_rq_create_num_pages,
+			       &rq_create->u.request,
+			       hrq->page_count);
+			bf_set(lpfc_mbx_rq_create_rq_cnt,
+			       &rq_create->u.request, (numrq * 2));
+			bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
+			       1);
+			bf_set(lpfc_rq_context_base_cq,
+			       &rq_create->u.request.context,
+			       cq->queue_id);
+			bf_set(lpfc_rq_context_data_size,
+			       &rq_create->u.request.context,
+			       LPFC_NVMET_DATA_BUF_SIZE);
+			bf_set(lpfc_rq_context_hdr_size,
+			       &rq_create->u.request.context,
+			       LPFC_HDR_BUF_SIZE);
+			bf_set(lpfc_rq_context_rqe_count_1,
+			       &rq_create->u.request.context,
+			       hrq->entry_count);
+			bf_set(lpfc_rq_context_rqe_size,
+			       &rq_create->u.request.context,
+			       LPFC_RQE_SIZE_8);
+			bf_set(lpfc_rq_context_page_size,
+			       &rq_create->u.request.context,
+			       (PAGE_SIZE/SLI4_PAGE_SIZE));
+		}
+		rc = 0;
+		list_for_each_entry(dmabuf, &hrq->page_list, list) {
+			memset(dmabuf->virt, 0, hw_page_size);
+			cnt = page_idx + dmabuf->buffer_tag;
+			rq_create->u.request.page[cnt].addr_lo =
+					putPaddrLow(dmabuf->phys);
+			rq_create->u.request.page[cnt].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+			rc++;
+		}
+		page_idx += rc;
+
+		rc = 0;
+		list_for_each_entry(dmabuf, &drq->page_list, list) {
+			memset(dmabuf->virt, 0, hw_page_size);
+			cnt = page_idx + dmabuf->buffer_tag;
+			rq_create->u.request.page[cnt].addr_lo =
+					putPaddrLow(dmabuf->phys);
+			rq_create->u.request.page[cnt].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+			rc++;
+		}
+		page_idx += rc;
+
+		hrq->db_format = LPFC_DB_RING_FORMAT;
+		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+		hrq->type = LPFC_HRQ;
+		hrq->assoc_qid = cq->queue_id;
+		hrq->subtype = subtype;
+		hrq->host_index = 0;
+		hrq->hba_index = 0;
+		hrq->entry_repost = LPFC_RQ_REPOST;
+
+		drq->db_format = LPFC_DB_RING_FORMAT;
+		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+		drq->type = LPFC_DRQ;
+		drq->assoc_qid = cq->queue_id;
+		drq->subtype = subtype;
+		drq->host_index = 0;
+		drq->hba_index = 0;
+		drq->entry_repost = LPFC_RQ_REPOST;
+
+		list_add_tail(&hrq->list, &cq->child_list);
+		list_add_tail(&drq->list, &cq->child_list);
+	}
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3120 RQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (rc == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+
+	/* Initialize all RQs with associated queue id */
+	for (idx = 0; idx < numrq; idx++) {
+		hrq = hrqp[idx];
+		hrq->queue_id = rc + (2 * idx);
+		drq = drqp[idx];
+		drq->queue_id = rc + (2 * idx) + 1;
+	}
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	return status;
+}
+
+/**
+ * lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @eq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @eq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @eq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!eq)
+		return -ENODEV;
+	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_eq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_EQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
+	       eq->queue_id);
+	mbox->vport = eq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2505 EQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+
+	/* Remove eq from any list */
+	list_del_init(&eq->list);
+	mempool_free(mbox, eq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @cq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @cq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @cq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!cq)
+		return -ENODEV;
+	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_cq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_CQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
+	       cq->queue_id);
+	mbox->vport = cq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2506 CQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove cq from any list */
+	list_del_init(&cq->list);
+	mempool_free(mbox, cq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
+ * @qm: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @mq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @mq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!mq)
+		return -ENODEV;
+	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_mq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
+	       mq->queue_id);
+	mbox->vport = mq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2507 MQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove mq from any list */
+	list_del_init(&mq->list);
+	mempool_free(mbox, mq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @wq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @wq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @wq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!wq)
+		return -ENODEV;
+	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_wq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
+	       wq->queue_id);
+	mbox->vport = wq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2508 WQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove wq from any list */
+	list_del_init(&wq->list);
+	kfree(wq->pring);
+	wq->pring = NULL;
+	mempool_free(mbox, wq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
+ * @rq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @rq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @rq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+int
+lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+		struct lpfc_queue *drq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!hrq || !drq)
+		return -ENODEV;
+	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_rq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+	       hrq->queue_id);
+	mbox->vport = hrq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2509 RQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, hrq->phba->mbox_mem_pool);
+		return -ENXIO;
+	}
+	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+	       drq->queue_id);
+	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2510 RQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	list_del_init(&hrq->list);
+	list_del_init(&drq->list);
+	mempool_free(mbox, hrq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ * @pdma_phys_addr0: Physical address of the 1st SGL page.
+ * @pdma_phys_addr1: Physical address of the 2nd SGL page.
+ * @xritag: the xritag that ties this io to the SGL pages.
+ *
+ * This routine will post the sgl pages for the IO that has the xritag
+ * that is in the iocbq structure. The xritag is assigned during iocbq
+ * creation and persists for as long as the driver is loaded.
+ * if the caller has fewer than 256 scatter gather segments to map then
+ * pdma_phys_addr1 should be 0.
+ * If the caller needs to map more than 256 scatter gather segment then
+ * pdma_phys_addr1 should be a valid physical address.
+ * physical address for SGLs must be 64 byte aligned.
+ * If you are going to map 2 SGL's then the first one must have 256 entries
+ * the second sgl can have between 1 and 256 entries.
+ *
+ * Return codes:
+ * 	0 - Success
+ * 	-ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+		dma_addr_t pdma_phys_addr0,
+		dma_addr_t pdma_phys_addr1,
+		uint16_t xritag)
+{
+	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t mbox_tmo;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (xritag == NO_XRI) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0364 Invalid param:\n");
+		return -EINVAL;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+			sizeof(struct lpfc_mbx_post_sgl_pages) -
+			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
+				&mbox->u.mqe.un.post_sgl_pages;
+	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
+	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
+
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
+				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
+
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
+				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2511 POST_SGL mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+static uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+	unsigned long xri;
+
+	/*
+	 * Fetch the next logical xri.  Because this index is logical,
+	 * the driver starts at 0 each time.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+				 phba->sli4_hba.max_cfg_param.max_xri, 0);
+	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+		spin_unlock_irq(&phba->hbalock);
+		return NO_XRI;
+	} else {
+		set_bit(xri, phba->sli4_hba.xri_bmask);
+		phba->sli4_hba.max_cfg_param.xri_used++;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+static void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+		phba->sli4_hba.max_cfg_param.xri_used--;
+	}
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_sli4_free_xri(phba, xri);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_next_xritag - Get an xritag for the io
+ * @phba: Pointer to HBA context object.
+ *
+ * This function gets an xritag for the iocb. If there is no unused xritag
+ * it will return 0xffff.
+ * The function returns the allocated xritag if successful, else returns zero.
+ * Zero is not a valid xritag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli4_next_xritag(struct lpfc_hba *phba)
+{
+	uint16_t xri_index;
+
+	xri_index = lpfc_sli4_alloc_xri(phba);
+	if (xri_index == NO_XRI)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2004 Failed to allocate XRI.last XRITAG is %d"
+				" Max XRI is %d, Used XRI is %d\n",
+				xri_index,
+				phba->sli4_hba.max_cfg_param.max_xri,
+				phba->sli4_hba.max_cfg_param.xri_used);
+	return xri_index;
+}
+
+/**
+ * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @post_sgl_list: pointer to els sgl entry list.
+ * @count: number of els sgl entries on the list.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+static int
+lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
+			    struct list_head *post_sgl_list,
+			    int post_cnt)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0;
+	int rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2559 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+			 LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0285 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	viraddr = mbox->sge_array->addr[0];
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	pg_pairs = 0;
+	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+				cpu_to_le32(putPaddrLow(sglq_entry->phys));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+				cpu_to_le32(putPaddrLow(0));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(0));
+
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = sglq_entry->sli4_xritag;
+		sgl_pg_pairs++;
+		pg_pairs++;
+	}
+
+	/* Complete initialization and perform endian conversion. */
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
+	sgl->word0 = cpu_to_le32(sgl->word0);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2513 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
+			      struct list_head *sblist,
+			      int count)
+{
+	struct lpfc_scsi_buf *psb;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0;
+	int rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	dma_addr_t pdma_phys_bpl1;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* Calculate the requested length of the dma memory */
+	reqlen = count * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0217 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0283 Failed to allocate mbox cmd memory\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+				LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2561 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory */
+	viraddr = mbox->sge_array->addr[0];
+
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	pg_pairs = 0;
+	list_for_each_entry(psb, sblist, list) {
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+			cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+			cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+			pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+		else
+			pdma_phys_bpl1 = 0;
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = psb->cur_iocbq.sli4_xritag;
+		sgl_pg_pairs++;
+		pg_pairs++;
+	}
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+	/* Perform endian conversion if necessary */
+	sgl->word0 = cpu_to_le32(sgl->word0);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2564 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
+ * @phba: pointer to lpfc_hba struct that the frame was received on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function checks the fields in the @fc_hdr to see if the FC frame is a
+ * valid type of frame that the LPFC driver will handle. This function will
+ * return a zero if the frame is a valid frame or a non zero value when the
+ * frame does not pass the check.
+ **/
+static int
+lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
+{
+	/*  make rctl_names static to save stack space */
+	struct fc_vft_header *fc_vft_hdr;
+	uint32_t *header = (uint32_t *) fc_hdr;
+
+#define FC_RCTL_MDS_DIAGS	0xF4
+
+	switch (fc_hdr->fh_r_ctl) {
+	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
+	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
+	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
+	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
+	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
+	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
+	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
+	case FC_RCTL_DD_CMD_STATUS:	/* command status */
+	case FC_RCTL_ELS_REQ:	/* extended link services request */
+	case FC_RCTL_ELS_REP:	/* extended link services reply */
+	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
+	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
+	case FC_RCTL_BA_NOP:  	/* basic link service NOP */
+	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
+	case FC_RCTL_BA_RMC: 	/* remove connection */
+	case FC_RCTL_BA_ACC:	/* basic accept */
+	case FC_RCTL_BA_RJT:	/* basic reject */
+	case FC_RCTL_BA_PRMT:
+	case FC_RCTL_ACK_1:	/* acknowledge_1 */
+	case FC_RCTL_ACK_0:	/* acknowledge_0 */
+	case FC_RCTL_P_RJT:	/* port reject */
+	case FC_RCTL_F_RJT:	/* fabric reject */
+	case FC_RCTL_P_BSY:	/* port busy */
+	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
+	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
+	case FC_RCTL_LCR:	/* link credit reset */
+	case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
+	case FC_RCTL_END:	/* end */
+		break;
+	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
+		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
+		return lpfc_fc_frame_check(phba, fc_hdr);
+	default:
+		goto drop;
+	}
+
+#define FC_TYPE_VENDOR_UNIQUE	0xFF
+
+	switch (fc_hdr->fh_type) {
+	case FC_TYPE_BLS:
+	case FC_TYPE_ELS:
+	case FC_TYPE_FCP:
+	case FC_TYPE_CT:
+	case FC_TYPE_NVME:
+	case FC_TYPE_VENDOR_UNIQUE:
+		break;
+	case FC_TYPE_IP:
+	case FC_TYPE_ILS:
+	default:
+		goto drop;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"2538 Received frame rctl:x%x, type:x%x, "
+			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+			fc_hdr->fh_r_ctl, fc_hdr->fh_type,
+			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+			be32_to_cpu(header[6]));
+	return 0;
+drop:
+	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+			"2539 Dropped frame rctl:x%x type:x%x\n",
+			fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+	return 1;
+}
+
+/**
+ * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function processes the FC header to retrieve the VFI from the VF
+ * header, if one exists. This function will return the VFI if one exists
+ * or 0 if no VSAN Header exists.
+ **/
+static uint32_t
+lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
+{
+	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+
+	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
+		return 0;
+	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
+}
+
+/**
+ * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
+ * @phba: Pointer to the HBA structure to search for the vport on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ * @fcfi: The FC Fabric ID that the frame came from
+ *
+ * This function searches the @phba for a vport that matches the content of the
+ * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
+ * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
+ * returns the matching vport pointer or NULL if unable to match frame to a
+ * vport.
+ **/
+static struct lpfc_vport *
+lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
+		       uint16_t fcfi, uint32_t did)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_vport *vport = NULL;
+	int i;
+
+	if (did == Fabric_DID)
+		return phba->pport;
+	if ((phba->pport->fc_flag & FC_PT2PT) &&
+		!(phba->link_state == LPFC_HBA_READY))
+		return phba->pport;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			if (phba->fcf.fcfi == fcfi &&
+			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
+			    vports[i]->fc_myDID == did) {
+				vport = vports[i];
+				break;
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+	return vport;
+}
+
+/**
+ * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
+ * @vport: The vport to work on.
+ *
+ * This function updates the receive sequence time stamp for this vport. The
+ * receive sequence time stamp indicates the time that the last frame of the
+ * the sequence that has been idle for the longest amount of time was received.
+ * the driver uses this time stamp to indicate if any received sequences have
+ * timed out.
+ **/
+static void
+lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf;
+	struct hbq_dmabuf *dmabuf = NULL;
+
+	/* get the oldest sequence on the rcv list */
+	h_buf = list_get_first(&vport->rcv_buffer_list,
+			       struct lpfc_dmabuf, list);
+	if (!h_buf)
+		return;
+	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
+}
+
+/**
+ * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function cleans up all outstanding received sequences. This is called
+ * by the driver when a link event or user action invalidates all the received
+ * sequences.
+ **/
+void
+lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf, *hnext;
+	struct lpfc_dmabuf *d_buf, *dnext;
+	struct hbq_dmabuf *dmabuf = NULL;
+
+	/* start with the oldest sequence on the rcv list */
+	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		list_del_init(&dmabuf->hbuf.list);
+		list_for_each_entry_safe(d_buf, dnext,
+					 &dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+	}
+}
+
+/**
+ * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function determines whether any received sequences have timed out by
+ * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
+ * indicates that there is at least one timed out sequence this routine will
+ * go through the received sequences one at a time from most inactive to most
+ * active to determine which ones need to be cleaned up. Once it has determined
+ * that a sequence needs to be cleaned up it will simply free up the resources
+ * without sending an abort.
+ **/
+void
+lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf, *hnext;
+	struct lpfc_dmabuf *d_buf, *dnext;
+	struct hbq_dmabuf *dmabuf = NULL;
+	unsigned long timeout;
+	int abort_count = 0;
+
+	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+		   vport->rcv_buffer_time_stamp);
+	if (list_empty(&vport->rcv_buffer_list) ||
+	    time_before(jiffies, timeout))
+		return;
+	/* start with the oldest sequence on the rcv list */
+	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+			   dmabuf->time_stamp);
+		if (time_before(jiffies, timeout))
+			break;
+		abort_count++;
+		list_del_init(&dmabuf->hbuf.list);
+		list_for_each_entry_safe(d_buf, dnext,
+					 &dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+	}
+	if (abort_count)
+		lpfc_update_rcv_time_stamp(vport);
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *new_hdr;
+	struct fc_frame_header *temp_hdr;
+	struct lpfc_dmabuf *d_buf;
+	struct lpfc_dmabuf *h_buf;
+	struct hbq_dmabuf *seq_dmabuf = NULL;
+	struct hbq_dmabuf *temp_dmabuf = NULL;
+	uint8_t	found = 0;
+
+	INIT_LIST_HEAD(&dmabuf->dbuf.list);
+	dmabuf->time_stamp = jiffies;
+	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+
+	/* Use the hdr_buf to find the sequence that this frame belongs to */
+	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+		temp_hdr = (struct fc_frame_header *)h_buf->virt;
+		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+			continue;
+		/* found a pending sequence that matches this frame */
+		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		break;
+	}
+	if (!seq_dmabuf) {
+		/*
+		 * This indicates first frame received for this sequence.
+		 * Queue the buffer on the vport's rcv_buffer_list.
+		 */
+		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		lpfc_update_rcv_time_stamp(vport);
+		return dmabuf;
+	}
+	temp_hdr = seq_dmabuf->hbuf.virt;
+	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+		list_del_init(&seq_dmabuf->hbuf.list);
+		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+		lpfc_update_rcv_time_stamp(vport);
+		return dmabuf;
+	}
+	/* move this sequence to the tail to indicate a young sequence */
+	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
+	seq_dmabuf->time_stamp = jiffies;
+	lpfc_update_rcv_time_stamp(vport);
+	if (list_empty(&seq_dmabuf->dbuf.list)) {
+		temp_hdr = dmabuf->hbuf.virt;
+		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+		return seq_dmabuf;
+	}
+	/* find the correct place in the sequence to insert this frame */
+	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
+	while (!found) {
+		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+		/*
+		 * If the frame's sequence count is greater than the frame on
+		 * the list then insert the frame right after this frame
+		 */
+		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+			found = 1;
+			break;
+		}
+
+		if (&d_buf->list == &seq_dmabuf->dbuf.list)
+			break;
+		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
+	}
+
+	if (found)
+		return seq_dmabuf;
+	return NULL;
+}
+
+/**
+ * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the partially assembed sequence, described
+ * by the information from basic abbort @dmabuf. It checks to see whether such
+ * partially assembled sequence held by the driver. If so, it shall free up all
+ * the frames from the partially assembled sequence.
+ *
+ * Return
+ * true  -- if there is matching partially assembled sequence present and all
+ *          the frames freed with the sequence;
+ * false -- if there is no matching partially assembled sequence present so
+ *          nothing got aborted in the lower layer driver
+ **/
+static bool
+lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
+			    struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *new_hdr;
+	struct fc_frame_header *temp_hdr;
+	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
+	struct hbq_dmabuf *seq_dmabuf = NULL;
+
+	/* Use the hdr_buf to find the sequence that matches this frame */
+	INIT_LIST_HEAD(&dmabuf->dbuf.list);
+	INIT_LIST_HEAD(&dmabuf->hbuf.list);
+	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+		temp_hdr = (struct fc_frame_header *)h_buf->virt;
+		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+			continue;
+		/* found a pending sequence that matches this frame */
+		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		break;
+	}
+
+	/* Free up all the frames from the partially assembled sequence */
+	if (seq_dmabuf) {
+		list_for_each_entry_safe(d_buf, n_buf,
+					 &seq_dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		return true;
+	}
+	return false;
+}
+
+/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true  -- if there is matching pending context of the sequence cleaned
+ *          at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ *          at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+	struct lpfc_hba *phba = vport->phba;
+	int handled;
+
+	/* Accepting abort at ulp with SLI4 only */
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return false;
+
+	/* Register all caring upper level protocols to attend abort */
+	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+	if (handled)
+		return true;
+
+	return false;
+}
+
+/**
+ * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
+ * @phba: Pointer to HBA context object.
+ * @cmd_iocbq: pointer to the command iocbq structure.
+ * @rsp_iocbq: pointer to the response iocbq structure.
+ *
+ * This function handles the sequence abort response iocb command complete
+ * event. It properly releases the memory allocated to the sequence abort
+ * accept iocb.
+ **/
+static void
+lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
+			     struct lpfc_iocbq *cmd_iocbq,
+			     struct lpfc_iocbq *rsp_iocbq)
+{
+	struct lpfc_nodelist *ndlp;
+
+	if (cmd_iocbq) {
+		ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+		lpfc_nlp_put(ndlp);
+		lpfc_nlp_not_used(ndlp);
+		lpfc_sli_release_iocbq(phba, cmd_iocbq);
+	}
+
+	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
+	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
+			rsp_iocbq->iocb.ulpStatus,
+			rsp_iocbq->iocb.un.ulpWord[4]);
+}
+
+/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+		      uint16_t xri)
+{
+	uint16_t i;
+
+	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+		if (xri == phba->sli4_hba.xri_ids[i])
+			return i;
+	}
+	return NO_XRI;
+}
+
+/**
+ * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
+ * @phba: Pointer to HBA context object.
+ * @fc_hdr: pointer to a FC frame header.
+ *
+ * This function sends a basic response to a previous unsol sequence abort
+ * event after aborting the sequence handling.
+ **/
+void
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+			struct fc_frame_header *fc_hdr, bool aborted)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *ctiocb = NULL;
+	struct lpfc_nodelist *ndlp;
+	uint16_t oxid, rxid, xri, lxri;
+	uint32_t sid, fctl;
+	IOCB_t *icmd;
+	int rc;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
+
+	ndlp = lpfc_findnode_did(vport, sid);
+	if (!ndlp) {
+		ndlp = lpfc_nlp_init(vport, sid);
+		if (!ndlp) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+					 "1268 Failed to allocate ndlp for "
+					 "oxid:x%x SID:x%x\n", oxid, sid);
+			return;
+		}
+		/* Put ndlp onto pport node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+					 "3275 Failed to active ndlp found "
+					 "for oxid:x%x SID:x%x\n", oxid, sid);
+			return;
+		}
+	}
+
+	/* Allocate buffer for rsp iocb */
+	ctiocb = lpfc_sli_get_iocbq(phba);
+	if (!ctiocb)
+		return;
+
+	/* Extract the F_CTL field from FC_HDR */
+	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
+
+	icmd = &ctiocb->iocb;
+	icmd->un.xseq64.bdl.bdeSize = 0;
+	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
+	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
+
+	/* Fill in the rest of iocb fields */
+	icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
+	icmd->ulpBdeCount = 0;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+	ctiocb->context1 = lpfc_nlp_get(ndlp);
+
+	ctiocb->iocb_cmpl = NULL;
+	ctiocb->vport = phba->pport;
+	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+	ctiocb->sli4_lxritag = NO_XRI;
+	ctiocb->sli4_xritag = NO_XRI;
+
+	if (fctl & FC_FC_EX_CTX)
+		/* Exchange responder sent the abort so we
+		 * own the oxid.
+		 */
+		xri = oxid;
+	else
+		xri = rxid;
+	lxri = lpfc_sli4_xri_inrange(phba, xri);
+	if (lxri != NO_XRI)
+		lpfc_set_rrq_active(phba, ndlp, lxri,
+			(xri == oxid) ? rxid : oxid, 0);
+	/* For BA_ABTS from exchange responder, if the logical xri with
+	 * the oxid maps to the FCP XRI range, the port no longer has
+	 * that exchange context, send a BLS_RJT. Override the IOCB for
+	 * a BA_RJT.
+	 */
+	if ((fctl & FC_FC_EX_CTX) &&
+	    (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
+		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+	}
+
+	/* If BA_ABTS failed to abort a partially assembled receive sequence,
+	 * the driver no longer has that exchange, send a BLS_RJT. Override
+	 * the IOCB for a BA_RJT.
+	 */
+	if (aborted == false) {
+		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+	}
+
+	if (fctl & FC_FC_EX_CTX) {
+		/* ABTS sent by responder to CT exchange, construction
+		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
+		 * field and RX_ID from ABTS for RX_ID field.
+		 */
+		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+	} else {
+		/* ABTS sent by initiator to CT exchange, construction
+		 * of BA_ACC will need to allocate a new XRI as for the
+		 * XRI_TAG field.
+		 */
+		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+	}
+	bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
+	bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+	/* Xmit CT abts response on exchange <xid> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+			 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2925 Failed to issue CT ABTS RSP x%x on "
+				 "xri x%x, Data x%x\n",
+				 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+				 phba->link_state);
+		lpfc_nlp_put(ndlp);
+		ctiocb->context1 = NULL;
+		lpfc_sli_release_iocbq(phba, ctiocb);
+	}
+}
+
+/**
+ * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles an SLI-4 unsolicited abort event. If the unsolicited
+ * receive sequence is only partially assembed by the driver, it shall abort
+ * the partially assembled frames for the sequence. Otherwise, if the
+ * unsolicited receive sequence has been completely assembled and passed to
+ * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
+ * unsolicited sequence has been aborted. After that, it will issue a basic
+ * accept to accept the abort.
+ **/
+static void
+lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
+			     struct hbq_dmabuf *dmabuf)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_frame_header fc_hdr;
+	uint32_t fctl;
+	bool aborted;
+
+	/* Make a copy of fc_hdr before the dmabuf being released */
+	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
+
+	if (fctl & FC_FC_EX_CTX) {
+		/* ABTS by responder to exchange, no cleanup needed */
+		aborted = true;
+	} else {
+		/* ABTS by initiator to exchange, need to do cleanup */
+		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+		if (aborted == false)
+			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
+	}
+	lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+	if (phba->nvmet_support) {
+		lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
+		return;
+	}
+
+	/* Respond with BA_ACC or BA_RJT accordingly */
+	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
+}
+
+/**
+ * lpfc_seq_complete - Indicates if a sequence is complete
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function checks the sequence, starting with the frame described by
+ * @dmabuf, to see if all the frames associated with this sequence are present.
+ * the frames associated with this sequence are linked to the @dmabuf using the
+ * dbuf list. This function looks for two major things. 1) That the first frame
+ * has a sequence count of zero. 2) There is a frame with last frame of sequence
+ * set. 3) That there are no holes in the sequence count. The function will
+ * return 1 when the sequence is complete, otherwise it will return 0.
+ **/
+static int
+lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *hdr;
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *seq_dmabuf;
+	uint32_t fctl;
+	int seq_count = 0;
+
+	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* make sure first fame of sequence has a sequence count of zero */
+	if (hdr->fh_seq_cnt != seq_count)
+		return 0;
+	fctl = (hdr->fh_f_ctl[0] << 16 |
+		hdr->fh_f_ctl[1] << 8 |
+		hdr->fh_f_ctl[2]);
+	/* If last frame of sequence we can return success. */
+	if (fctl & FC_FC_END_SEQ)
+		return 1;
+	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
+		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+		/* If there is a hole in the sequence count then fail. */
+		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
+			return 0;
+		fctl = (hdr->fh_f_ctl[0] << 16 |
+			hdr->fh_f_ctl[1] << 8 |
+			hdr->fh_f_ctl[2]);
+		/* If last frame of sequence we can return success. */
+		if (fctl & FC_FC_END_SEQ)
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_prep_seq - Prep sequence for ULP processing
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function takes a sequence, described by a list of frames, and creates
+ * a list of iocbq structures to describe the sequence. This iocbq list will be
+ * used to issue to the generic unsolicited sequence handler. This routine
+ * returns a pointer to the first iocbq in the list. If the function is unable
+ * to allocate an iocbq then it throw out the received frames that were not
+ * able to be described and return a pointer to the first iocbq. If unable to
+ * allocate any iocbqs (including the first) this function will return NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+{
+	struct hbq_dmabuf *hbq_buf;
+	struct lpfc_dmabuf *d_buf, *n_buf;
+	struct lpfc_iocbq *first_iocbq, *iocbq;
+	struct fc_frame_header *fc_hdr;
+	uint32_t sid;
+	uint32_t len, tot_len;
+	struct ulp_bde64 *pbde;
+
+	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+	/* remove from receive buffer list */
+	list_del_init(&seq_dmabuf->hbuf.list);
+	lpfc_update_rcv_time_stamp(vport);
+	/* get the Remote Port's SID */
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+	tot_len = 0;
+	/* Get an iocbq struct to fill in. */
+	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
+	if (first_iocbq) {
+		/* Initialize the first IOCB. */
+		first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
+		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+		first_iocbq->vport = vport;
+
+		/* Check FC Header to see what TYPE of frame we are rcv'ing */
+		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
+			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
+			first_iocbq->iocb.un.rcvels.parmRo =
+				sli4_did_from_fc_hdr(fc_hdr);
+			first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
+		} else
+			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+		first_iocbq->iocb.ulpContext = NO_XRI;
+		first_iocbq->iocb.unsli3.rcvsli3.ox_id =
+			be16_to_cpu(fc_hdr->fh_ox_id);
+		/* iocbq is prepped for internal consumption.  Physical vpi. */
+		first_iocbq->iocb.unsli3.rcvsli3.vpi =
+			vport->phba->vpi_ids[vport->vpi];
+		/* put the first buffer into the first IOCBq */
+		tot_len = bf_get(lpfc_rcqe_length,
+				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+		first_iocbq->context2 = &seq_dmabuf->dbuf;
+		first_iocbq->context3 = NULL;
+		first_iocbq->iocb.ulpBdeCount = 1;
+		if (tot_len > LPFC_DATA_BUF_SIZE)
+			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+		else
+			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
+		first_iocbq->iocb.un.rcvels.remoteID = sid;
+
+		first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+	}
+	iocbq = first_iocbq;
+	/*
+	 * Each IOCBq can have two Buffers assigned, so go through the list
+	 * of buffers for this sequence and save two buffers in each IOCBq
+	 */
+	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
+		if (!iocbq) {
+			lpfc_in_buf_free(vport->phba, d_buf);
+			continue;
+		}
+		if (!iocbq->context3) {
+			iocbq->context3 = d_buf;
+			iocbq->iocb.ulpBdeCount++;
+			/* We need to get the size out of the right CQE */
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			len = bf_get(lpfc_rcqe_length,
+				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
+			pbde = (struct ulp_bde64 *)
+					&iocbq->iocb.unsli3.sli3Words[4];
+			if (len > LPFC_DATA_BUF_SIZE)
+				pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+			else
+				pbde->tus.f.bdeSize = len;
+
+			iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+			tot_len += len;
+		} else {
+			iocbq = lpfc_sli_get_iocbq(vport->phba);
+			if (!iocbq) {
+				if (first_iocbq) {
+					first_iocbq->iocb.ulpStatus =
+							IOSTAT_FCP_RSP_ERROR;
+					first_iocbq->iocb.un.ulpWord[4] =
+							IOERR_NO_RESOURCES;
+				}
+				lpfc_in_buf_free(vport->phba, d_buf);
+				continue;
+			}
+			/* We need to get the size out of the right CQE */
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			len = bf_get(lpfc_rcqe_length,
+				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
+			iocbq->context2 = d_buf;
+			iocbq->context3 = NULL;
+			iocbq->iocb.ulpBdeCount = 1;
+			if (len > LPFC_DATA_BUF_SIZE)
+				iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+			else
+				iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
+
+			tot_len += len;
+			iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+
+			iocbq->iocb.un.rcvels.remoteID = sid;
+			list_add_tail(&iocbq->list, &first_iocbq->list);
+		}
+	}
+	/* Free the sequence's header buffer */
+	if (!first_iocbq)
+		lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
+
+	return first_iocbq;
+}
+
+static void
+lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
+			  struct hbq_dmabuf *seq_dmabuf)
+{
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
+	struct lpfc_hba *phba = vport->phba;
+
+	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+	if (!iocbq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2707 Ring %d handler: Failed to allocate "
+				"iocb Rctl x%x Type x%x received\n",
+				LPFC_ELS_RING,
+				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+		return;
+	}
+	if (!lpfc_complete_unsol_iocb(phba,
+				      phba->sli4_hba.els_wq->pring,
+				      iocbq, fc_hdr->fh_r_ctl,
+				      fc_hdr->fh_type))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2540 Ring %d handler: unexpected Rctl "
+				"x%x Type x%x received\n",
+				LPFC_ELS_RING,
+				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+
+	/* Free iocb created in lpfc_prep_seq */
+	list_for_each_entry_safe(curr_iocb, next_iocb,
+		&iocbq->list, list) {
+		list_del_init(&curr_iocb->list);
+		lpfc_sli_release_iocbq(phba, curr_iocb);
+	}
+	lpfc_sli_release_iocbq(phba, iocbq);
+}
+
+static void
+lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+
+	if (pcmd && pcmd->virt)
+		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+	kfree(pcmd);
+	lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+static void
+lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
+			      struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *iocbq = NULL;
+	union  lpfc_wqe *wqe;
+	struct lpfc_dmabuf *pcmd = NULL;
+	uint32_t frame_len;
+	int rc;
+
+	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
+
+	/* Send the received frame back */
+	iocbq = lpfc_sli_get_iocbq(phba);
+	if (!iocbq)
+		goto exit;
+
+	/* Allocate buffer for command payload */
+	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (pcmd)
+		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+					    &pcmd->phys);
+	if (!pcmd || !pcmd->virt)
+		goto exit;
+
+	INIT_LIST_HEAD(&pcmd->list);
+
+	/* copyin the payload */
+	memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
+
+	/* fill in BDE's for command */
+	iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
+	iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
+	iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+	iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
+
+	iocbq->context2 = pcmd;
+	iocbq->vport = vport;
+	iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
+	iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+	/*
+	 * Setup rest of the iocb as though it were a WQE
+	 * Build the SEND_FRAME WQE
+	 */
+	wqe = (union lpfc_wqe *)&iocbq->iocb;
+
+	wqe->send_frame.frame_len = frame_len;
+	wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
+	wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
+	wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
+	wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
+	wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
+	wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
+
+	iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
+	iocbq->iocb.ulpLe = 1;
+	iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
+	if (rc == IOCB_ERROR)
+		goto exit;
+
+	lpfc_in_buf_free(phba, &dmabuf->dbuf);
+	return;
+
+exit:
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"2023 Unable to process MDS loopback frame\n");
+	if (pcmd && pcmd->virt)
+		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
+	kfree(pcmd);
+	if (iocbq)
+		lpfc_sli_release_iocbq(phba, iocbq);
+	lpfc_in_buf_free(phba, &dmabuf->dbuf);
+}
+
+/**
+ * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the received buffers and gives it to upper layers when a received buffer
+ * indicates that it is the final frame in the sequence. The interrupt
+ * service routine processes received buffers at interrupt contexts.
+ * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
+ * appropriate receive function when the final frame in a sequence is received.
+ **/
+void
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
+				 struct hbq_dmabuf *dmabuf)
+{
+	struct hbq_dmabuf *seq_dmabuf;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_vport *vport;
+	uint32_t fcfi;
+	uint32_t did;
+
+	/* Process each received buffer */
+	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+
+	/* check to see if this a valid type of frame */
+	if (lpfc_fc_frame_check(phba, fc_hdr)) {
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+
+	if ((bf_get(lpfc_cqe_code,
+		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
+		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
+			      &dmabuf->cq_event.cqe.rcqe_cmpl);
+	else
+		fcfi = bf_get(lpfc_rcqe_fcf_id,
+			      &dmabuf->cq_event.cqe.rcqe_cmpl);
+
+	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
+		vport = phba->pport;
+		/* Handle MDS Loopback frames */
+		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+		return;
+	}
+
+	/* d_id this frame is directed to */
+	did = sli4_did_from_fc_hdr(fc_hdr);
+
+	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
+	if (!vport) {
+		/* throw out the frame */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+
+	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
+	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
+		(did != Fabric_DID)) {
+		/*
+		 * Throw out the frame if we are not pt2pt.
+		 * The pt2pt protocol allows for discovery frames
+		 * to be received without a registered VPI.
+		 */
+		if (!(vport->fc_flag & FC_PT2PT) ||
+			(phba->link_state == LPFC_HBA_READY)) {
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+			return;
+		}
+	}
+
+	/* Handle the basic abort sequence (BA_ABTS) event */
+	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
+		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
+		return;
+	}
+
+	/* Link this frame */
+	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+	if (!seq_dmabuf) {
+		/* unable to add frame to vport - throw it out */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	/* If not last frame in sequence continue processing frames. */
+	if (!lpfc_seq_complete(seq_dmabuf))
+		return;
+
+	/* Send the complete sequence to the upper layer protocol */
+	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
+}
+
+/**
+ * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * This routine does not require any locks.  It's usage is expected
+ * to be driver load or reset recovery when the driver is
+ * sequential.
+ *
+ * Return codes
+ * 	0 - successful
+ *      -EIO - The mailbox failed to complete successfully.
+ * 	When this error occurs, the driver is not guaranteed
+ *	to have any rpi regions posted to the device and
+ *	must either attempt to repost the regions or take a
+ *	fatal error.
+ **/
+int
+lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
+{
+	struct lpfc_rpi_hdr *rpi_page;
+	uint32_t rc = 0;
+	uint16_t lrpi = 0;
+
+	/* SLI4 ports that support extents do not require RPI headers. */
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		goto exit;
+	if (phba->sli4_hba.extents_in_use)
+		return -EIO;
+
+	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+		/*
+		 * Assign the rpi headers a physical rpi only if the driver
+		 * has not initialized those resources.  A port reset only
+		 * needs the headers posted.
+		 */
+		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+		    LPFC_RPI_RSRC_RDY)
+			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
+		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2008 Error %d posting all rpi "
+					"headers\n", rc);
+			rc = -EIO;
+			break;
+		}
+	}
+
+ exit:
+	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+	       LPFC_RPI_RSRC_RDY);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @rpi_page:  pointer to the rpi memory region.
+ *
+ * This routine is invoked to post a single rpi header to the
+ * HBA consistent with the SLI-4 interface spec.  This memory region
+ * maps up to 64 rpi context regions.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
+	uint32_t rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* SLI4 ports that support extents do not require RPI headers. */
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		return rc;
+	if (phba->sli4_hba.extents_in_use)
+		return -EIO;
+
+	/* The port is notified of the header region via a mailbox command. */
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2001 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
+	}
+
+	/* Post all rpi memory regions to the port. */
+	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
+			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
+			 sizeof(struct lpfc_sli4_cfg_mhdr),
+			 LPFC_SLI4_MBX_EMBED);
+
+
+	/* Post the physical rpi to the port for this rpi header. */
+	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
+	       rpi_page->start_rpi);
+	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+	       hdr_tmpl, rpi_page->page_count);
+
+	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
+	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2514 POST_RPI_HDR mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	} else {
+		/*
+		 * The next_rpi stores the next logical module-64 rpi value used
+		 * to post physical rpis in subsequent rpi postings.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		phba->sli4_hba.next_rpi = rpi_page->next_rpi;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+int
+lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
+{
+	unsigned long rpi;
+	uint16_t max_rpi, rpi_limit;
+	uint16_t rpi_remaining, lrpi = 0;
+	struct lpfc_rpi_hdr *rpi_hdr;
+	unsigned long iflag;
+
+	/*
+	 * Fetch the next logical rpi.  Because this index is logical,
+	 * the  driver starts at 0 each time.
+	 */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+	rpi_limit = phba->sli4_hba.next_rpi;
+
+	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+	if (rpi >= rpi_limit)
+		rpi = LPFC_RPI_ALLOC_ERROR;
+	else {
+		set_bit(rpi, phba->sli4_hba.rpi_bmask);
+		phba->sli4_hba.max_cfg_param.rpi_used++;
+		phba->sli4_hba.rpi_count++;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0001 rpi:%x max:%x lim:%x\n",
+			(int) rpi, max_rpi, rpi_limit);
+
+	/*
+	 * Don't try to allocate more rpi header regions if the device limit
+	 * has been exhausted.
+	 */
+	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
+	    (phba->sli4_hba.rpi_count >= max_rpi)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return rpi;
+	}
+
+	/*
+	 * RPI header postings are not required for SLI4 ports capable of
+	 * extents.
+	 */
+	if (!phba->sli4_hba.rpi_hdrs_in_use) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return rpi;
+	}
+
+	/*
+	 * If the driver is running low on rpi resources, allocate another
+	 * page now.  Note that the next_rpi value is used because
+	 * it represents how many are actually in use whereas max_rpi notes
+	 * how many are supported max by the device.
+	 */
+	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
+		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+		if (!rpi_hdr) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2002 Error Could not grow rpi "
+					"count\n");
+		} else {
+			lrpi = rpi_hdr->start_rpi;
+			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
+		}
+	}
+
+	return rpi;
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+static void
+__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+	/*
+	 * if the rpi value indicates a prior unreg has already
+	 * been done, skip the unreg.
+	 */
+	if (rpi == LPFC_RPI_ALLOC_ERROR)
+		return;
+
+	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
+		phba->sli4_hba.rpi_count--;
+		phba->sli4_hba.max_cfg_param.rpi_used--;
+	}
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_sli4_free_rpi(phba, rpi);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+void
+lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
+{
+	kfree(phba->sli4_hba.rpi_bmask);
+	kfree(phba->sli4_hba.rpi_ids);
+	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+}
+
+/**
+ * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+int
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+	void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_hba *phba = ndlp->phba;
+	int rc;
+
+	/* The port is notified of the header region via a mailbox command. */
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	/* Post all rpi memory regions to the port. */
+	lpfc_resume_rpi(mboxq, ndlp);
+	if (cmpl) {
+		mboxq->mbox_cmpl = cmpl;
+		mboxq->context1 = arg;
+		mboxq->context2 = ndlp;
+	} else
+		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mboxq->vport = ndlp->vport;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2010 Resume RPI Mailbox failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_init_vpi - Initialize a vpi with the port
+ * @vport: Pointer to the vport for which the vpi is being initialized
+ *
+ * This routine is invoked to activate a vpi with the port.
+ *
+ * Returns:
+ *    0 success
+ *    -Evalue otherwise
+ **/
+int
+lpfc_sli4_init_vpi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc = 0;
+	int retval = MBX_SUCCESS;
+	uint32_t mbox_tmo;
+	struct lpfc_hba *phba = vport->phba;
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+	lpfc_init_vpi(phba, mboxq, vport->vpi);
+	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+				"2022 INIT VPI Mailbox failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		retval = -EIO;
+	}
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+
+	return retval;
+}
+
+/**
+ * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+static void
+lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	void *virt_addr;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+
+	virt_addr = mboxq->sge_array->addr[0];
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+	if ((shdr_status || shdr_add_status) &&
+		(shdr_status != STATUS_FCF_IN_USE))
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2558 ADD_FCF_RECORD mailbox failed with "
+			"status x%x add_status x%x\n",
+			shdr_status, shdr_add_status);
+
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the initialized fcf record to add.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+int
+lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
+{
+	int rc = 0;
+	LPFC_MBOXQ_t *mboxq;
+	uint8_t *bytep;
+	void *virt_addr;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	uint32_t fcfindex;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
+		return -ENOMEM;
+	}
+
+	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
+		  sizeof(uint32_t);
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
+				     req_len, LPFC_SLI4_MBX_NEMBED);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2523 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Get the first SGE entry from the non-embedded DMA memory.  This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	virt_addr = mboxq->sge_array->addr[0];
+	/*
+	 * Configure the FCF record for FCFI 0.  This is the driver's
+	 * hardcoded default and gets used in nonFIP mode.
+	 */
+	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
+
+	/*
+	 * Copy the fcf_index and the FCF Record Data. The data starts after
+	 * the FCoE header plus word10. The data copy needs to be endian
+	 * correct.
+	 */
+	bytep += sizeof(uint32_t);
+	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2515 ADD_FCF_RECORD mailbox failed with "
+			"status 0x%x\n", rc);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		rc = -EIO;
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the fcf record to write the default data.
+ * @fcf_index: FCF table entry index.
+ *
+ * This routine is invoked to build the driver's default FCF record.  The
+ * values used are hardcoded.  This routine handles memory initialization.
+ *
+ **/
+void
+lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
+				struct fcf_record *fcf_record,
+				uint16_t fcf_index)
+{
+	memset(fcf_record, 0, sizeof(struct fcf_record));
+	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
+	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
+	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
+	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
+	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
+	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
+	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
+	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
+	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
+	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
+	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
+	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
+	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
+	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
+	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
+		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
+	/* Set the VLAN bit map */
+	if (phba->valid_vlan) {
+		fcf_record->vlan_bitmap[phba->vlan_id / 8]
+			= 1 << (phba->vlan_id % 8);
+	}
+}
+
+/**
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2000 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+		error = -ENOMEM;
+		goto fail_fcf_scan;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_scan;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag |= FCF_TS_INPROG;
+	spin_unlock_irq(&phba->hbalock);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else {
+		/* Reset eligible FCF count for new scan */
+		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+			phba->fcf.eligible_fcf_cnt = 0;
+		error = 0;
+	}
+fail_fcf_scan:
+	if (error) {
+		if (mboxq)
+			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		/* FCF scan failed, clear FCF_TS_INPROG flag */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI roundrobin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+				"2763 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+		error = -ENOMEM;
+		goto fail_fcf_read;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_read;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else
+		error = 0;
+
+fail_fcf_read:
+	if (error && mboxq)
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI roundrobin failover list.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+				"2758 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+				error = -ENOMEM;
+				goto fail_fcf_read;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_read;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else
+		error = 0;
+
+fail_fcf_read:
+	if (error && mboxq)
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	return error;
+}
+
+/**
+ * lpfc_check_next_fcf_pri_level
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+static int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+	uint16_t next_fcf_pri;
+	uint16_t last_index;
+	struct lpfc_fcf_pri *fcf_pri;
+	int rc;
+	int ret = 0;
+
+	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+			LPFC_SLI4_FCF_TBL_INDX_MAX);
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"3060 Last IDX %d\n", last_index);
+
+	/* Verify the priority list has 2 or more entries */
+	spin_lock_irq(&phba->hbalock);
+	if (list_empty(&phba->fcf.fcf_pri_list) ||
+	    list_is_singular(&phba->fcf.fcf_pri_list)) {
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+			"3061 Last IDX %d\n", last_index);
+		return 0; /* Empty rr list */
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	next_fcf_pri = 0;
+	/*
+	 * Clear the rr_bmask and set all of the bits that are at this
+	 * priority.
+	 */
+	memset(phba->fcf.fcf_rr_bmask, 0,
+			sizeof(*phba->fcf.fcf_rr_bmask));
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+			continue;
+		/*
+		 * the 1st priority that has not FLOGI failed
+		 * will be the highest.
+		 */
+		if (!next_fcf_pri)
+			next_fcf_pri = fcf_pri->fcf_rec.priority;
+		spin_unlock_irq(&phba->hbalock);
+		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+			rc = lpfc_sli4_fcf_rr_index_set(phba,
+						fcf_pri->fcf_rec.fcf_index);
+			if (rc)
+				return 0;
+		}
+		spin_lock_irq(&phba->hbalock);
+	}
+	/*
+	 * if next_fcf_pri was not set above and the list is not empty then
+	 * we have failed flogis on all of them. So reset flogi failed
+	 * and start at the beginning.
+	 */
+	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+			/*
+			 * the 1st priority that has not FLOGI failed
+			 * will be the highest.
+			 */
+			if (!next_fcf_pri)
+				next_fcf_pri = fcf_pri->fcf_rec.priority;
+			spin_unlock_irq(&phba->hbalock);
+			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+				rc = lpfc_sli4_fcf_rr_index_set(phba,
+						fcf_pri->fcf_rec.fcf_index);
+				if (rc)
+					return 0;
+			}
+			spin_lock_irq(&phba->hbalock);
+		}
+	} else
+		ret = 1;
+	spin_unlock_irq(&phba->hbalock);
+
+	return ret;
+}
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+	uint16_t next_fcf_index;
+
+initial_priority:
+	/* Search start from next bit of currently registered FCF index */
+	next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+next_priority:
+	/* Determine the next fcf index to check */
+	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
+	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+				       LPFC_SLI4_FCF_TBL_INDX_MAX,
+				       next_fcf_index);
+
+	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
+	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		/*
+		 * If we have wrapped then we need to clear the bits that
+		 * have been tested so that we can detect when we should
+		 * change the priority level.
+		 */
+		next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+					       LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+	}
+
+
+	/* Check roundrobin failover list empty condition */
+	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+		/*
+		 * If next fcf index is not found check if there are lower
+		 * Priority level fcf's in the fcf_priority list.
+		 * Set up the rr_bmask with all of the avaiable fcf bits
+		 * at that level and continue the selection process.
+		 */
+		if (lpfc_check_next_fcf_pri_level(phba))
+			goto initial_priority;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2844 No roundrobin failover FCF available\n");
+
+		return LPFC_FCOE_FCF_NEXT_NONE;
+	}
+
+	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+		LPFC_FCF_FLOGI_FAILED) {
+		if (list_is_singular(&phba->fcf.fcf_pri_list))
+			return LPFC_FCOE_FCF_NEXT_NONE;
+
+		goto next_priority;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2845 Get next roundrobin failover FCF (x%x)\n",
+			next_fcf_index);
+
+	return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2610 FCF (x%x) reached driver's book "
+				"keeping dimension:x%x\n",
+				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+		return -EINVAL;
+	}
+	/* Set the eligible FCF record index bmask */
+	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2790 Set FCF (x%x) to roundrobin FCF failover "
+			"bmask\n", fcf_index);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
+	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2762 FCF (x%x) reached driver's book "
+				"keeping dimension:x%x\n",
+				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+		return;
+	}
+	/* Clear the eligible FCF record index bmask */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
+				 list) {
+		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+			list_del_init(&fcf_pri->list);
+			break;
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2791 Clear FCF (x%x) from roundrobin failover "
+			"bmask\n", fcf_index);
+}
+
+/**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+static void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+	uint32_t shdr_status, shdr_add_status;
+
+	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+	shdr_status = bf_get(lpfc_mbox_hdr_status,
+			     &redisc_fcf->header.cfg_shdr.response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+			     &redisc_fcf->header.cfg_shdr.response);
+	if (shdr_status || shdr_add_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2746 Requesting for FCF rediscovery failed "
+				"status x%x add_status x%x\n",
+				shdr_status, shdr_add_status);
+		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * CVL event triggered FCF rediscover request failed,
+			 * last resort to re-try current registered FCF entry.
+			 */
+			lpfc_retry_pport_discovery(phba);
+		} else {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * DEAD FCF event triggered FCF rediscover request
+			 * failed, last resort to fail over as a link down
+			 * to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		}
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2775 Start FCF rediscover quiescent timer\n");
+		/*
+		 * Start FCF rediscovery wait timer for pending FCF
+		 * before rescan FCF record table.
+		 */
+		lpfc_fcf_redisc_wait_start_timer(phba);
+	}
+
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+	int rc, length;
+
+	/* Cancel retry delay timers to all vports before FCF rediscover */
+	lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2745 Failed to allocate mbox for "
+				"requesting FCF rediscover.\n");
+		return -ENOMEM;
+	}
+
+	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+	/* Set count to 0 for invalidating the entire FCF database */
+	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+	/* Issue the mailbox command asynchronously */
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+	uint32_t link_state;
+
+	/*
+	 * Last resort as FCF DEAD event failover will treat this as
+	 * a link down, but save the link state because we don't want
+	 * it to be changed to Link Down unless it is already down.
+	 */
+	link_state = phba->link_state;
+	lpfc_linkdown(phba);
+	phba->link_state = link_state;
+
+	/* Unregister FCF if no devices connected to it */
+	lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+	LPFC_MBOXQ_t *pmb = NULL;
+	MAILBOX_t *mb;
+	uint32_t offset = 0;
+	int rc;
+
+	if (!rgn23_data)
+		return 0;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2600 failed to allocate mailbox memory\n");
+		return 0;
+	}
+	mb = &pmb->u.mb;
+
+	do {
+		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2601 failed to read config "
+					"region 23, rc 0x%x Status 0x%x\n",
+					rc, mb->mbxStatus);
+			mb->un.varDmp.word_cnt = 0;
+		}
+		/*
+		 * dump mem may return a zero when finished or we got a
+		 * mailbox error, either way we are done.
+		 */
+		if (mb->un.varDmp.word_cnt == 0)
+			break;
+		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
+			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
+
+		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				       rgn23_data + offset,
+				       mb->un.varDmp.word_cnt);
+		offset += mb->un.varDmp.word_cnt;
+	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+	LPFC_MBOXQ_t *mboxq = NULL;
+	struct lpfc_dmabuf *mp = NULL;
+	struct lpfc_mqe *mqe;
+	uint32_t data_length = 0;
+	int rc;
+
+	if (!rgn23_data)
+		return 0;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3105 failed to allocate mailbox memory\n");
+		return 0;
+	}
+
+	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+		goto out;
+	mqe = &mboxq->u.mqe;
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc)
+		goto out;
+	data_length = mqe->un.mb_words[5];
+	if (data_length == 0)
+		goto out;
+	if (data_length > DMP_RGN23_SIZE) {
+		data_length = 0;
+		goto out;
+	}
+	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	if (mp) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+	}
+	return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+	uint8_t *rgn23_data = NULL;
+	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+	uint32_t offset = 0;
+
+	/* Get adapter Region 23 data */
+	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+	if (!rgn23_data)
+		goto out;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+	else {
+		if_type = bf_get(lpfc_sli_intf_if_type,
+				 &phba->sli4_hba.sli_intf);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+			goto out;
+		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+	}
+
+	if (!data_size)
+		goto out;
+
+	/* Check the region signature first */
+	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2619 Config region 23 has bad signature\n");
+			goto out;
+	}
+	offset += 4;
+
+	/* Check the data structure version */
+	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2620 Config region 23 has bad version\n");
+		goto out;
+	}
+	offset += 4;
+
+	/* Parse TLV entries in the region */
+	while (offset < data_size) {
+		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
+			break;
+		/*
+		 * If the TLV is not driver specific TLV or driver id is
+		 * not linux driver id, skip the record.
+		 */
+		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
+		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
+		    (rgn23_data[offset + 3] != 0)) {
+			offset += rgn23_data[offset + 1] * 4 + 4;
+			continue;
+		}
+
+		/* Driver found a driver specific TLV in the config region */
+		sub_tlv_len = rgn23_data[offset + 1] * 4;
+		offset += 4;
+		tlv_offset = 0;
+
+		/*
+		 * Search for configured port state sub-TLV.
+		 */
+		while ((offset < data_size) &&
+			(tlv_offset < sub_tlv_len)) {
+			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
+				offset += 4;
+				tlv_offset += 4;
+				break;
+			}
+			if (rgn23_data[offset] != PORT_STE_TYPE) {
+				offset += rgn23_data[offset + 1] * 4 + 4;
+				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
+				continue;
+			}
+
+			/* This HBA contains PORT_STE configured */
+			if (!rgn23_data[offset + 2])
+				phba->hba_flag |= LINK_DISABLED;
+
+			goto out;
+		}
+	}
+
+out:
+	kfree(rgn23_data);
+	return;
+}
+
+/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+	       uint32_t size, uint32_t *offset)
+{
+	struct lpfc_mbx_wr_object *wr_object;
+	LPFC_MBOXQ_t *mbox;
+	int rc = 0, i = 0;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t mbox_tmo;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t written = 0;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			LPFC_MBOX_OPCODE_WRITE_OBJECT,
+			sizeof(struct lpfc_mbx_wr_object) -
+			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+	wr_object->u.request.write_offset = *offset;
+	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+	wr_object->u.request.object_name[0] =
+		cpu_to_le32(wr_object->u.request.object_name[0]);
+	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+	list_for_each_entry(dmabuf, dmabuf_list, list) {
+		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+			break;
+		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+		wr_object->u.request.bde[i].addrHigh =
+			putPaddrHigh(dmabuf->phys);
+		if (written + SLI4_PAGE_SIZE >= size) {
+			wr_object->u.request.bde[i].tus.f.bdeSize =
+				(size - written);
+			written += (size - written);
+			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+		} else {
+			wr_object->u.request.bde[i].tus.f.bdeSize =
+				SLI4_PAGE_SIZE;
+			written += SLI4_PAGE_SIZE;
+		}
+		i++;
+	}
+	wr_object->u.request.bde_count = i;
+	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3025 Write Object mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	} else
+		*offset += wr_object->u.response.actual_write_length;
+	return rc;
+}
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mb, *nextmb;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_nodelist *act_mbx_ndlp = NULL;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	LIST_HEAD(mbox_cmd_list);
+	uint8_t restart_loop;
+
+	/* Clean up internally queued mailbox commands with the vport */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if (mb->vport != vport)
+			continue;
+
+		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+			(mb->u.mb.mbxCommand != MBX_REG_VPI))
+			continue;
+
+		list_del(&mb->list);
+		list_add_tail(&mb->list, &mbox_cmd_list);
+	}
+	/* Clean up active mailbox command with the vport */
+	mb = phba->sli.mbox_active;
+	if (mb && (mb->vport == vport)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+			(mb->u.mb.mbxCommand == MBX_REG_VPI))
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+			act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
+			/* Put reference count for delayed processing */
+			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+			/* Unregister the RPI when mailbox complete */
+			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+		}
+	}
+	/* Cleanup any mailbox completions which are not yet processed */
+	do {
+		restart_loop = 0;
+		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+			/*
+			 * If this mailox is already processed or it is
+			 * for another vport ignore it.
+			 */
+			if ((mb->vport != vport) ||
+				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
+				continue;
+
+			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+				(mb->u.mb.mbxCommand != MBX_REG_VPI))
+				continue;
+
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+				ndlp = (struct lpfc_nodelist *)mb->context2;
+				/* Unregister the RPI when mailbox complete */
+				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				restart_loop = 1;
+				spin_unlock_irq(&phba->hbalock);
+				spin_lock(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+				spin_unlock(shost->host_lock);
+				spin_lock_irq(&phba->hbalock);
+				break;
+			}
+		}
+	} while (restart_loop);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Release the cleaned-up mailbox commands */
+	while (!list_empty(&mbox_cmd_list)) {
+		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
+		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			ndlp = (struct lpfc_nodelist *) mb->context2;
+			mb->context2 = NULL;
+			if (ndlp) {
+				spin_lock(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+				spin_unlock(shost->host_lock);
+				lpfc_nlp_put(ndlp);
+			}
+		}
+		mempool_free(mb, phba->mbox_mem_pool);
+	}
+
+	/* Release the ndlp with the cleaned-up active mailbox command */
+	if (act_mbx_ndlp) {
+		spin_lock(shost->host_lock);
+		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+		spin_unlock(shost->host_lock);
+		lpfc_nlp_put(act_mbx_ndlp);
+	}
+}
+
+/**
+ * lpfc_drain_txq - Drain the txq
+ * @phba: Pointer to HBA context object.
+ *
+ * This function attempt to submit IOCBs on the txq
+ * to the adapter.  For SLI4 adapters, the txq contains
+ * ELS IOCBs that have been deferred because the there
+ * are no SGLs.  This congestion can occur with large
+ * vport counts during node discovery.
+ **/
+
+uint32_t
+lpfc_drain_txq(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *piocbq = NULL;
+	unsigned long iflags = 0;
+	char *fail_msg = NULL;
+	struct lpfc_sglq *sglq;
+	union lpfc_wqe128 wqe128;
+	union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
+	uint32_t txq_cnt = 0;
+
+	pring = lpfc_phba_elsring(phba);
+	if (unlikely(!pring))
+		return 0;
+
+	spin_lock_irqsave(&pring->ring_lock, iflags);
+	list_for_each_entry(piocbq, &pring->txq, list) {
+		txq_cnt++;
+	}
+
+	if (txq_cnt > pring->txq_max)
+		pring->txq_max = txq_cnt;
+
+	spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+	while (!list_empty(&pring->txq)) {
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+
+		piocbq = lpfc_sli_ringtx_get(phba, pring);
+		if (!piocbq) {
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2823 txq empty and txq_cnt is %d\n ",
+				txq_cnt);
+			break;
+		}
+		sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
+		if (!sglq) {
+			__lpfc_sli_ringtx_put(phba, pring, piocbq);
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			break;
+		}
+		txq_cnt--;
+
+		/* The xri and iocb resources secured,
+		 * attempt to issue request
+		 */
+		piocbq->sli4_lxritag = sglq->sli4_lxritag;
+		piocbq->sli4_xritag = sglq->sli4_xritag;
+		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
+			fail_msg = "to convert bpl to sgl";
+		else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
+			fail_msg = "to convert iocb to wqe";
+		else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
+			fail_msg = " - Wq is full";
+		else
+			lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+
+		if (fail_msg) {
+			/* Failed means we can't issue and need to cancel */
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2822 IOCB failed %s iotag 0x%x "
+					"xri 0x%x\n",
+					fail_msg,
+					piocbq->iotag, piocbq->sli4_xritag);
+			list_add_tail(&piocbq->list, &completions);
+		}
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+	}
+
+	/* Cancel all the IOCBs that cannot be issued */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+				IOERR_SLI_ABORTED);
+
+	return txq_cnt;
+}
+
+/**
+ * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @pwqe: Pointer to command WQE.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the WQE
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the WQE contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the WQE contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The WQE is still in cpu endianness so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+ */
+static uint16_t
+lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
+		 struct lpfc_sglq *sglq)
+{
+	uint16_t xritag = NO_XRI;
+	struct ulp_bde64 *bpl = NULL;
+	struct ulp_bde64 bde;
+	struct sli4_sge *sgl  = NULL;
+	struct lpfc_dmabuf *dmabuf;
+	union lpfc_wqe *wqe;
+	int numBdes = 0;
+	int i = 0;
+	uint32_t offset = 0; /* accumulated offset in the sg request list */
+	int inbound = 0; /* number of sg reply entries inbound from firmware */
+	uint32_t cmd;
+
+	if (!pwqeq || !sglq)
+		return xritag;
+
+	sgl  = (struct sli4_sge *)sglq->sgl;
+	wqe = &pwqeq->wqe;
+	pwqeq->iocb.ulpIoTag = pwqeq->iotag;
+
+	cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
+	if (cmd == CMD_XMIT_BLS_RSP64_WQE)
+		return sglq->sli4_xritag;
+	numBdes = pwqeq->rsvd2;
+	if (numBdes) {
+		/* The addrHigh and addrLow fields within the WQE
+		 * have not been byteswapped yet so there is no
+		 * need to swap them back.
+		 */
+		if (pwqeq->context3)
+			dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
+		else
+			return xritag;
+
+		bpl  = (struct ulp_bde64 *)dmabuf->virt;
+		if (!bpl)
+			return xritag;
+
+		for (i = 0; i < numBdes; i++) {
+			/* Should already be byte swapped. */
+			sgl->addr_hi = bpl->addrHigh;
+			sgl->addr_lo = bpl->addrLow;
+
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			if ((i+1) == numBdes)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			/* swap the size field back to the cpu so we
+			 * can assign it to the sgl.
+			 */
+			bde.tus.w = le32_to_cpu(bpl->tus.w);
+			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+			/* The offsets in the sgl need to be accumulated
+			 * separately for the request and reply lists.
+			 * The request is always first, the reply follows.
+			 */
+			switch (cmd) {
+			case CMD_GEN_REQUEST64_WQE:
+				/* add up the reply sg entries */
+				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+					inbound++;
+				/* first inbound? reset the offset */
+				if (inbound == 1)
+					offset = 0;
+				bf_set(lpfc_sli4_sge_offset, sgl, offset);
+				bf_set(lpfc_sli4_sge_type, sgl,
+					LPFC_SGE_TYPE_DATA);
+				offset += bde.tus.f.bdeSize;
+				break;
+			case CMD_FCP_TRSP64_WQE:
+				bf_set(lpfc_sli4_sge_offset, sgl, 0);
+				bf_set(lpfc_sli4_sge_type, sgl,
+					LPFC_SGE_TYPE_DATA);
+				break;
+			case CMD_FCP_TSEND64_WQE:
+			case CMD_FCP_TRECEIVE64_WQE:
+				bf_set(lpfc_sli4_sge_type, sgl,
+					bpl->tus.f.bdeFlags);
+				if (i < 3)
+					offset = 0;
+				else
+					offset += bde.tus.f.bdeSize;
+				bf_set(lpfc_sli4_sge_offset, sgl, offset);
+				break;
+			}
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			bpl++;
+			sgl++;
+		}
+	} else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
+		/* The addrHigh and addrLow fields of the BDE have not
+		 * been byteswapped yet so they need to be swapped
+		 * before putting them in the sgl.
+		 */
+		sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
+		sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
+	}
+	return sglq->sli4_xritag;
+}
+
+/**
+ * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
+ * @phba: Pointer to HBA context object.
+ * @ring_number: Base sli ring number
+ * @pwqe: Pointer to command WQE.
+ **/
+int
+lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *pwqe)
+{
+	union lpfc_wqe *wqe = &pwqe->wqe;
+	struct lpfc_nvmet_rcv_ctx *ctxp;
+	struct lpfc_queue *wq;
+	struct lpfc_sglq *sglq;
+	struct lpfc_sli_ring *pring;
+	unsigned long iflags;
+	uint32_t ret = 0;
+
+	/* NVME_LS and NVME_LS ABTS requests. */
+	if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
+		pring =  phba->sli4_hba.nvmels_wq->pring;
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
+		if (!sglq) {
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			return WQE_BUSY;
+		}
+		pwqe->sli4_lxritag = sglq->sli4_lxritag;
+		pwqe->sli4_xritag = sglq->sli4_xritag;
+		if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			return WQE_ERROR;
+		}
+		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
+		       pwqe->sli4_xritag);
+		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
+		if (ret) {
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			return ret;
+		}
+
+		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+		return 0;
+	}
+
+	/* NVME_FCREQ and NVME_ABTS requests */
+	if (pwqe->iocb_flag & LPFC_IO_NVME) {
+		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
+		pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+		bf_set(wqe_cqid, &wqe->generic.wqe_com,
+		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+		ret = lpfc_sli4_wq_put(wq, wqe);
+		if (ret) {
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			return ret;
+		}
+		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+		return 0;
+	}
+
+	/* NVMET requests */
+	if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
+		pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+		ctxp = pwqe->context2;
+		sglq = ctxp->ctxbuf->sglq;
+		if (pwqe->sli4_xritag ==  NO_XRI) {
+			pwqe->sli4_lxritag = sglq->sli4_lxritag;
+			pwqe->sli4_xritag = sglq->sli4_xritag;
+		}
+		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
+		       pwqe->sli4_xritag);
+		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+		bf_set(wqe_cqid, &wqe->generic.wqe_com,
+		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+		ret = lpfc_sli4_wq_put(wq, wqe);
+		if (ret) {
+			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+			return ret;
+		}
+		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
+		return 0;
+	}
+	return WQE_ERROR;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli.h
new file mode 100644
index 0000000..a3b1b51
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli.h
@@ -0,0 +1,353 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* forward declaration for LPFC_IOCB_t's use */
+struct lpfc_hba;
+struct lpfc_vport;
+
+/* Define the context types that SLI handles for abort and sums. */
+typedef enum _lpfc_ctx_cmd {
+	LPFC_CTX_LUN,
+	LPFC_CTX_TGT,
+	LPFC_CTX_HOST
+} lpfc_ctx_cmd;
+
+struct lpfc_cq_event {
+	struct list_head list;
+	union {
+		struct lpfc_mcqe		mcqe_cmpl;
+		struct lpfc_acqe_link		acqe_link;
+		struct lpfc_acqe_fip		acqe_fip;
+		struct lpfc_acqe_dcbx		acqe_dcbx;
+		struct lpfc_acqe_grp5		acqe_grp5;
+		struct lpfc_acqe_fc_la		acqe_fc;
+		struct lpfc_acqe_sli		acqe_sli;
+		struct lpfc_rcqe		rcqe_cmpl;
+		struct sli4_wcqe_xri_aborted	wcqe_axri;
+		struct lpfc_wcqe_complete	wcqe_cmpl;
+	} cqe;
+};
+
+/* This structure is used to handle IOCB requests / responses */
+struct lpfc_iocbq {
+	/* lpfc_iocbqs are used in double linked lists */
+	struct list_head list;
+	struct list_head clist;
+	struct list_head dlist;
+	uint16_t iotag;         /* pre-assigned IO tag */
+	uint16_t sli4_lxritag;  /* logical pre-assigned XRI. */
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	uint16_t hba_wqidx;     /* index to HBA work queue */
+	struct lpfc_cq_event cq_event;
+	struct lpfc_wcqe_complete wcqe_cmpl;	/* WQE cmpl */
+	uint64_t isr_timestamp;
+
+	/* Be careful here */
+	union lpfc_wqe wqe;	/* WQE cmd */
+	IOCB_t iocb;		/* For IOCB cmd or if we want 128 byte WQE */
+
+	uint8_t rsvd2;
+	uint8_t priority;	/* OAS priority */
+	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
+	uint32_t iocb_flag;
+#define LPFC_IO_LIBDFC		1	/* libdfc iocb */
+#define LPFC_IO_WAKE		2	/* Synchronous I/O completed */
+#define LPFC_IO_WAKE_TMO	LPFC_IO_WAKE /* Synchronous I/O timed out */
+#define LPFC_IO_FCP		4	/* FCP command -- iocbq in scsi_buf */
+#define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
+#define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
+#define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
+#define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
+#define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
+#define DSS_SECURITY_OP		0x100	/* security IO */
+#define LPFC_IO_ON_TXCMPLQ	0x200	/* The IO is still on the TXCMPLQ */
+#define LPFC_IO_DIF_PASS	0x400	/* T10 DIF IO pass-thru prot */
+#define LPFC_IO_DIF_STRIP	0x800	/* T10 DIF IO strip prot */
+#define LPFC_IO_DIF_INSERT	0x1000	/* T10 DIF IO insert prot */
+#define LPFC_IO_CMD_OUTSTANDING	0x2000 /* timeout handler abort window */
+
+#define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
+#define LPFC_FIP_ELS_ID_SHIFT	14
+
+#define LPFC_IO_OAS		0x10000 /* OAS FCP IO */
+#define LPFC_IO_FOF		0x20000 /* FOF FCP IO */
+#define LPFC_IO_LOOPBACK	0x40000 /* Loopback IO */
+#define LPFC_PRLI_NVME_REQ	0x80000 /* This is an NVME PRLI. */
+#define LPFC_PRLI_FCP_REQ	0x100000 /* This is an NVME PRLI. */
+#define LPFC_IO_NVME	        0x200000 /* NVME FCP command */
+#define LPFC_IO_NVME_LS		0x400000 /* NVME LS command */
+#define LPFC_IO_NVMET		0x800000 /* NVMET command */
+
+	uint32_t drvrTimeout;	/* driver timeout in seconds */
+	struct lpfc_vport *vport;/* virtual port pointer */
+	void *context1;		/* caller context information */
+	void *context2;		/* caller context information */
+	void *context3;		/* caller context information */
+	union {
+		wait_queue_head_t    *wait_queue;
+		struct lpfc_iocbq    *rsp_iocb;
+		struct lpfcMboxq     *mbox;
+		struct lpfc_nodelist *ndlp;
+		struct lpfc_node_rrq *rrq;
+	} context_un;
+
+	void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_iocbq *);
+	void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_iocbq *);
+	void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_iocbq *);
+	void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+			  struct lpfc_wcqe_complete *);
+};
+
+#define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
+
+#define IOCB_SUCCESS        0
+#define IOCB_BUSY           1
+#define IOCB_ERROR          2
+#define IOCB_TIMEDOUT       3
+
+#define SLI_WQE_RET_WQE    1    /* Return WQE if cmd ring full */
+
+#define WQE_SUCCESS        0
+#define WQE_BUSY           1
+#define WQE_ERROR          2
+#define WQE_TIMEDOUT       3
+#define WQE_ABORTED        4
+
+#define LPFC_MBX_WAKE		1
+#define LPFC_MBX_IMED_UNREG	2
+
+typedef struct lpfcMboxq {
+	/* MBOXQs are used in single linked lists */
+	struct list_head list;	/* ptr to next mailbox command */
+	union {
+		MAILBOX_t mb;		/* Mailbox cmd */
+		struct lpfc_mqe mqe;
+	} u;
+	struct lpfc_vport *vport;/* virtual port pointer */
+	void *context1;		/* caller context information */
+	void *context2;		/* caller context information */
+
+	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+	uint8_t mbox_flag;
+	uint16_t in_ext_byte_len;
+	uint16_t out_ext_byte_len;
+	uint8_t  mbox_offset_word;
+	struct lpfc_mcqe mcqe;
+	struct lpfc_mbx_nembed_sge_virt *sge_array;
+} LPFC_MBOXQ_t;
+
+#define MBX_POLL        1	/* poll mailbox till command done, then
+				   return */
+#define MBX_NOWAIT      2	/* issue command then return immediately */
+
+#define LPFC_MAX_RING_MASK  5	/* max num of rctl/type masks allowed per
+				   ring */
+#define LPFC_SLI3_MAX_RING  4	/* Max num of SLI3 rings used by driver.
+				   For SLI4, an additional ring for each
+				   FCP WQ will be allocated.  */
+
+struct lpfc_sli_ring;
+
+struct lpfc_sli_ring_mask {
+	uint8_t profile;	/* profile associated with ring */
+	uint8_t rctl;	/* rctl / type pair configured for ring */
+	uint8_t type;	/* rctl / type pair configured for ring */
+	uint8_t rsvd;
+	/* rcv'd unsol event */
+	void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
+					 struct lpfc_sli_ring *,
+					 struct lpfc_iocbq *);
+};
+
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_ring_stat {
+	uint64_t iocb_event;	 /* IOCB event counters */
+	uint64_t iocb_cmd;	 /* IOCB cmd issued */
+	uint64_t iocb_rsp;	 /* IOCB rsp received */
+	uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
+	uint64_t iocb_cmd_full;	 /* IOCB cmd ring full */
+	uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
+	uint64_t iocb_rsp_full;	 /* IOCB rsp ring full */
+};
+
+struct lpfc_sli3_ring {
+	uint32_t local_getidx;  /* last available cmd index (from cmdGetInx) */
+	uint32_t next_cmdidx;   /* next_cmd index */
+	uint32_t rspidx;	/* current index in response ring */
+	uint32_t cmdidx;	/* current index in command ring */
+	uint16_t numCiocb;	/* number of command iocb's per ring */
+	uint16_t numRiocb;	/* number of rsp iocb's per ring */
+	uint16_t sizeCiocb;	/* Size of command iocb's in this ring */
+	uint16_t sizeRiocb;	/* Size of response iocb's in this ring */
+	uint32_t *cmdringaddr;	/* virtual address for cmd rings */
+	uint32_t *rspringaddr;	/* virtual address for rsp rings */
+};
+
+struct lpfc_sli4_ring {
+	struct lpfc_queue *wqp;	/* Pointer to associated WQ */
+};
+
+
+/* Structure used to hold SLI ring information */
+struct lpfc_sli_ring {
+	uint16_t flag;		/* ring flags */
+#define LPFC_DEFERRED_RING_EVENT 0x001	/* Deferred processing a ring event */
+#define LPFC_CALL_RING_AVAILABLE 0x002	/* indicates cmd was full */
+#define LPFC_STOP_IOCB_EVENT     0x020	/* Stop processing IOCB cmds event */
+	uint16_t abtsiotag;	/* tracks next iotag to use for ABTS */
+
+	uint8_t rsvd;
+	uint8_t ringno;		/* ring number */
+
+	spinlock_t ring_lock;	/* lock for issuing commands */
+
+	uint32_t fast_iotag;	/* max fastlookup based iotag           */
+	uint32_t iotag_ctr;	/* keeps track of the next iotag to use */
+	uint32_t iotag_max;	/* max iotag value to use               */
+	struct list_head txq;
+	uint16_t txq_cnt;	/* current length of queue */
+	uint16_t txq_max;	/* max length */
+	struct list_head txcmplq;
+	uint16_t txcmplq_cnt;	/* current length of queue */
+	uint16_t txcmplq_max;	/* max length */
+	uint32_t missbufcnt;	/* keep track of buffers to post */
+	struct list_head postbufq;
+	uint16_t postbufq_cnt;	/* current length of queue */
+	uint16_t postbufq_max;	/* max length */
+	struct list_head iocb_continueq;
+	uint16_t iocb_continueq_cnt;	/* current length of queue */
+	uint16_t iocb_continueq_max;	/* max length */
+	struct list_head iocb_continue_saveq;
+
+	struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
+	uint32_t num_mask;	/* number of mask entries in prt array */
+	void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
+		struct lpfc_sli_ring *, struct lpfc_iocbq *);
+
+	struct lpfc_sli_ring_stat stats;	/* SLI statistical info */
+
+	/* cmd ring available */
+	void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
+					struct lpfc_sli_ring *);
+	union {
+		struct lpfc_sli3_ring sli3;
+		struct lpfc_sli4_ring sli4;
+	} sli;
+};
+
+/* Structure used for configuring rings to a specific profile or rctl / type */
+struct lpfc_hbq_init {
+	uint32_t rn;		/* Receive buffer notification */
+	uint32_t entry_count;	/* max # of entries in HBQ */
+	uint32_t headerLen;	/* 0 if not profile 4 or 5 */
+	uint32_t logEntry;	/* Set to 1 if this HBQ used for LogEntry */
+	uint32_t profile;	/* Selection profile 0=all, 7=logentry */
+	uint32_t ring_mask;	/* Binds HBQ to a ring e.g. Ring0=b0001,
+				 * ring2=b0100 */
+	uint32_t hbq_index;	/* index of this hbq in ring .HBQs[] */
+
+	uint32_t seqlenoff;
+	uint32_t maxlen;
+	uint32_t seqlenbcnt;
+	uint32_t cmdcodeoff;
+	uint32_t cmdmatch[8];
+	uint32_t mask_count;	/* number of mask entries in prt array */
+	struct hbq_mask hbqMasks[6];
+
+	/* Non-config rings fields to keep track of buffer allocations */
+	uint32_t buffer_count;	/* number of buffers allocated */
+	uint32_t init_count;	/* number to allocate when initialized */
+	uint32_t add_count;	/* number to allocate when starved */
+} ;
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_stat {
+	uint64_t mbox_stat_err;  /* Mbox cmds completed status error */
+	uint64_t mbox_cmd;       /* Mailbox commands issued */
+	uint64_t sli_intr;       /* Count of Host Attention interrupts */
+	uint64_t sli_prev_intr;  /* Previous cnt of Host Attention interrupts */
+	uint64_t sli_ips;        /* Host Attention interrupts per sec */
+	uint32_t err_attn_event; /* Error Attn event counters */
+	uint32_t link_event;     /* Link event counters */
+	uint32_t mbox_event;     /* Mailbox event counters */
+	uint32_t mbox_busy;	 /* Mailbox cmd busy */
+};
+
+/* Structure to store link status values when port stats are reset */
+struct lpfc_lnk_stat {
+	uint32_t link_failure_count;
+	uint32_t loss_of_sync_count;
+	uint32_t loss_of_signal_count;
+	uint32_t prim_seq_protocol_err_count;
+	uint32_t invalid_tx_word_count;
+	uint32_t invalid_crc_count;
+	uint32_t error_frames;
+	uint32_t link_events;
+};
+
+/* Structure used to hold SLI information */
+struct lpfc_sli {
+	uint32_t num_rings;
+	uint32_t sli_flag;
+
+	/* Additional sli_flags */
+#define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
+#define LPFC_SLI_ACTIVE           0x200	/* SLI in firmware is active */
+#define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
+#define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
+#define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
+#define LPFC_SLI_SUPPRESS_RSP     0x4000 /* Suppress RSP feature is supported */
+#define LPFC_SLI_USE_EQDR         0x8000 /* EQ Delay Register is supported */
+
+	struct lpfc_sli_ring *sli3_ring;
+
+	struct lpfc_sli_stat slistat;	/* SLI statistical info */
+	struct list_head mboxq;
+	uint16_t mboxq_cnt;	/* current length of queue */
+	uint16_t mboxq_max;	/* max length */
+	LPFC_MBOXQ_t *mbox_active;	/* active mboxq information */
+	struct list_head mboxq_cmpl;
+
+	struct timer_list mbox_tmo;	/* Hold clk to timeout active mbox
+					   cmd */
+
+#define LPFC_IOCBQ_LOOKUP_INCREMENT  1024
+	struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
+	size_t iocbq_lookup_len;           /* current lengs of the array */
+	uint16_t  last_iotag;              /* last allocated IOTAG */
+	unsigned long  stats_start;        /* in seconds */
+	struct lpfc_lnk_stat lnk_stat_offsets;
+};
+
+/* Timeout for normal outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO				30
+/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_TMO		60
+/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO	300
+/* Timeout for other flash-based outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO_FLASH_CMD			300
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli4.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 0000000..a132a83
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,852 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2009-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO		10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1   		10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
+#define LPFC_RPI_LOW_WATER_MARK			10
+
+#define LPFC_UNREG_FCF                          1
+#define LPFC_SKIP_UNREG_FCF                     0
+
+/* Amount of time in seconds for waiting FCF rediscovery to complete */
+#define LPFC_FCF_REDISCOVER_WAIT_TMO		2000 /* msec */
+
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT		254
+
+/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
+#define LPFC_HBA_IO_CHAN_MIN	0
+#define LPFC_HBA_IO_CHAN_MAX	32
+#define LPFC_FCP_IO_CHAN_DEF	4
+#define LPFC_NVME_IO_CHAN_DEF	0
+
+/* Number of channels used for Flash Optimized Fabric (FOF) operations */
+
+#define LPFC_FOF_IO_CHAN_NUM       1
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX	0
+#define LPFC_FCOE_FCF_GET_FIRST	0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE	0xFFFF
+
+#define LPFC_FCOE_NULL_VID	0xFFF
+#define LPFC_FCOE_IGNORE_VID	0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3	0xFF
+#define LPFC_FCOE_FCF_MAC4	0xFF
+#define LPFC_FCOE_FCF_MAC5	0xFE
+#define LPFC_FCOE_FCF_MAP0	0x0E
+#define LPFC_FCOE_FCF_MAP1	0xFC
+#define LPFC_FCOE_FCF_MAP2	0x00
+#define LPFC_FCOE_MAX_RCV_SIZE	0x800
+#define LPFC_FCOE_FKA_ADV_PER	0
+#define LPFC_FCOE_FIP_PRIORITY	0x80
+
+#define sli4_sid_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_s_id[0] << 16 | \
+	 (fc_hdr)->fh_s_id[1] <<  8 | \
+	 (fc_hdr)->fh_s_id[2])
+
+#define sli4_did_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_d_id[0] << 16 | \
+	 (fc_hdr)->fh_d_id[1] <<  8 | \
+	 (fc_hdr)->fh_d_id[2])
+
+#define sli4_fctl_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_f_ctl[0] << 16 | \
+	 (fc_hdr)->fh_f_ctl[1] <<  8 | \
+	 (fc_hdr)->fh_f_ctl[2])
+
+#define sli4_type_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_type)
+
+#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+
+#define INT_FW_UPGRADE	0
+#define RUN_FW_UPGRADE	1
+
+enum lpfc_sli4_queue_type {
+	LPFC_EQ,
+	LPFC_GCQ,
+	LPFC_MCQ,
+	LPFC_WCQ,
+	LPFC_RCQ,
+	LPFC_MQ,
+	LPFC_WQ,
+	LPFC_HRQ,
+	LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+	LPFC_NONE,
+	LPFC_MBOX,
+	LPFC_FCP,
+	LPFC_ELS,
+	LPFC_NVME,
+	LPFC_NVMET,
+	LPFC_NVME_LS,
+	LPFC_USOL
+};
+
+union sli4_qe {
+	void *address;
+	struct lpfc_eqe *eqe;
+	struct lpfc_cqe *cqe;
+	struct lpfc_mcqe *mcqe;
+	struct lpfc_wcqe_complete *wcqe_complete;
+	struct lpfc_wcqe_release *wcqe_release;
+	struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+	struct lpfc_rcqe_complete *rcqe_complete;
+	struct lpfc_mqe *mqe;
+	union  lpfc_wqe *wqe;
+	union  lpfc_wqe128 *wqe128;
+	struct lpfc_rqe *rqe;
+};
+
+/* RQ buffer list */
+struct lpfc_rqb {
+	uint16_t entry_count;	  /* Current number of RQ slots */
+	uint16_t buffer_count;	  /* Current number of buffers posted */
+	struct list_head rqb_buffer_list;  /* buffers assigned to this HBQ */
+				  /* Callback for HBQ buffer allocation */
+	struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *);
+				  /* Callback for HBQ buffer free */
+	void               (*rqb_free_buffer)(struct lpfc_hba *,
+					       struct rqb_dmabuf *);
+};
+
+struct lpfc_queue {
+	struct list_head list;
+	struct list_head wq_list;
+	enum lpfc_sli4_queue_type type;
+	enum lpfc_sli4_queue_subtype subtype;
+	struct lpfc_hba *phba;
+	struct list_head child_list;
+	struct list_head page_list;
+	struct list_head sgl_list;
+	uint32_t entry_count;	/* Number of entries to support on the queue */
+	uint32_t entry_size;	/* Size of each queue entry. */
+	uint32_t entry_repost;	/* Count of entries before doorbell is rung */
+#define LPFC_EQ_REPOST		8
+#define LPFC_MQ_REPOST		8
+#define LPFC_CQ_REPOST		64
+#define LPFC_RQ_REPOST		64
+#define LPFC_MAX_ISR_CQE	64
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL	32  /* For WQs */
+	uint32_t queue_id;	/* Queue ID assigned by the hardware */
+	uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
+	uint32_t page_count;	/* Number of pages allocated for this queue */
+	uint32_t host_index;	/* The host's index for putting or getting */
+	uint32_t hba_index;	/* The last known hba index for get or put */
+
+	struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+	struct lpfc_rqb *rqbp;	/* ptr to RQ buffers */
+
+	uint32_t q_mode;
+	uint16_t db_format;
+#define LPFC_DB_RING_FORMAT	0x01
+#define LPFC_DB_LIST_FORMAT	0x02
+	void __iomem *db_regaddr;
+	/* For q stats */
+	uint32_t q_cnt_1;
+	uint32_t q_cnt_2;
+	uint32_t q_cnt_3;
+	uint64_t q_cnt_4;
+/* defines for EQ stats */
+#define	EQ_max_eqe		q_cnt_1
+#define	EQ_no_entry		q_cnt_2
+#define	EQ_cqe_cnt		q_cnt_3
+#define	EQ_processed		q_cnt_4
+
+/* defines for CQ stats */
+#define	CQ_mbox			q_cnt_1
+#define	CQ_max_cqe		q_cnt_1
+#define	CQ_release_wqe		q_cnt_2
+#define	CQ_xri_aborted		q_cnt_3
+#define	CQ_wq			q_cnt_4
+
+/* defines for WQ stats */
+#define	WQ_overflow		q_cnt_1
+#define	WQ_posted		q_cnt_4
+
+/* defines for RQ stats */
+#define	RQ_no_posted_buf	q_cnt_1
+#define	RQ_no_buf_found		q_cnt_2
+#define	RQ_buf_posted		q_cnt_3
+#define	RQ_rcv_buf		q_cnt_4
+
+	uint64_t isr_timestamp;
+	struct lpfc_queue *assoc_qp;
+	union sli4_qe qe[1];	/* array to index entries (must be last) */
+};
+
+struct lpfc_sli4_link {
+	uint16_t speed;
+	uint8_t duplex;
+	uint8_t status;
+	uint8_t type;
+	uint8_t number;
+	uint8_t fault;
+	uint16_t logical_speed;
+	uint16_t topology;
+};
+
+struct lpfc_fcf_rec {
+	uint8_t  fabric_name[8];
+	uint8_t  switch_name[8];
+	uint8_t  mac_addr[6];
+	uint16_t fcf_indx;
+	uint32_t priority;
+	uint16_t vlan_id;
+	uint32_t addr_mode;
+	uint32_t flag;
+#define BOOT_ENABLE	0x01
+#define RECORD_VALID	0x02
+};
+
+struct lpfc_fcf_pri_rec {
+	uint16_t fcf_index;
+#define LPFC_FCF_ON_PRI_LIST 0x0001
+#define LPFC_FCF_FLOGI_FAILED 0x0002
+	uint16_t flag;
+	uint32_t priority;
+};
+
+struct lpfc_fcf_pri {
+	struct list_head list;
+	struct lpfc_fcf_pri_rec fcf_rec;
+};
+
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX	32
+
+struct lpfc_fcf {
+	uint16_t fcfi;
+	uint32_t fcf_flag;
+#define FCF_AVAILABLE	0x01 /* FCF available for discovery */
+#define FCF_REGISTERED	0x02 /* FCF registered with FW */
+#define FCF_SCAN_DONE	0x04 /* FCF table scan done */
+#define FCF_IN_USE	0x08 /* Atleast one discovery completed */
+#define FCF_INIT_DISC	0x10 /* Initial FCF discovery */
+#define FCF_DEAD_DISC	0x20 /* FCF DEAD fast FCF failover discovery */
+#define FCF_ACVL_DISC	0x40 /* All CVL fast FCF failover discovery */
+#define FCF_DISCOVERY	(FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
+#define FCF_REDISC_PEND	0x80 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT	0x100 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV	0x200 /* Post FCF rediscovery fast failover */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
+	uint16_t fcf_redisc_attempted;
+	uint32_t addr_mode;
+	uint32_t eligible_fcf_cnt;
+	struct lpfc_fcf_rec current_rec;
+	struct lpfc_fcf_rec failover_rec;
+	struct list_head fcf_pri_list;
+	struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
+	uint32_t current_fcf_scan_pri;
+	struct timer_list redisc_wait;
+	unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
+};
+
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION	1
+#define LPFC_REGION23_LAST_REC  0xff
+#define DRIVER_SPECIFIC_TYPE	0xA2
+#define LINUX_DRIVER_ID		0x20
+#define PORT_STE_TYPE		0x1
+
+struct lpfc_fip_param_hdr {
+	uint8_t type;
+#define FCOE_PARAM_TYPE		0xA0
+	uint8_t length;
+#define FCOE_PARAM_LENGTH	2
+	uint8_t parm_version;
+#define FIPP_VERSION		0x01
+	uint8_t parm_flags;
+#define	lpfc_fip_param_hdr_fipp_mode_SHIFT	6
+#define	lpfc_fip_param_hdr_fipp_mode_MASK	0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD	parm_flags
+#define	FIPP_MODE_ON				0x1
+#define	FIPP_MODE_OFF				0x0
+#define FIPP_VLAN_VALID				0x1
+};
+
+struct lpfc_fcoe_params {
+	uint8_t fc_map[3];
+	uint8_t reserved1;
+	uint16_t vlan_tag;
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+	uint8_t type;
+#define FCOE_CONN_TBL_TYPE		0xA1
+	uint8_t length;   /* words */
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+	uint16_t flags;
+#define	FCFCNCT_VALID		0x0001
+#define	FCFCNCT_BOOT		0x0002
+#define	FCFCNCT_PRIMARY		0x0004   /* if not set, Secondary */
+#define	FCFCNCT_FBNM_VALID	0x0008
+#define	FCFCNCT_SWNM_VALID	0x0010
+#define	FCFCNCT_VLAN_VALID	0x0020
+#define	FCFCNCT_AM_VALID	0x0040
+#define	FCFCNCT_AM_PREFERRED	0x0080   /* if not set, AM Required */
+#define	FCFCNCT_AM_SPMA		0x0100	 /* if not set, FPMA */
+
+	uint16_t vlan_tag;
+	uint8_t fabric_name[8];
+	uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+	struct list_head list;
+	struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox.  This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address dma_address;
+	void *avirt;
+	dma_addr_t aphys;
+	uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B 	4
+#define LPFC_EQE_SIZE_16B	16
+#define LPFC_CQE_SIZE		16
+#define LPFC_WQE_SIZE		64
+#define LPFC_WQE128_SIZE	128
+#define LPFC_MQE_SIZE		256
+#define LPFC_RQE_SIZE		8
+
+#define LPFC_EQE_DEF_COUNT	1024
+#define LPFC_CQE_DEF_COUNT      1024
+#define LPFC_WQE_DEF_COUNT      256
+#define LPFC_WQE128_DEF_COUNT   128
+#define LPFC_WQE128_MAX_COUNT   256
+#define LPFC_MQE_DEF_COUNT      16
+#define LPFC_RQE_DEF_COUNT	512
+
+#define LPFC_QUEUE_NOARM	false
+#define LPFC_QUEUE_REARM	true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+	uint16_t max_xri;
+	uint16_t xri_base;
+	uint16_t xri_used;
+	uint16_t max_rpi;
+	uint16_t rpi_base;
+	uint16_t rpi_used;
+	uint16_t max_vpi;
+	uint16_t vpi_base;
+	uint16_t vpi_used;
+	uint16_t max_vfi;
+	uint16_t vfi_base;
+	uint16_t vfi_used;
+	uint16_t max_fcfi;
+	uint16_t fcfi_used;
+	uint16_t max_eq;
+	uint16_t max_rq;
+	uint16_t max_cq;
+	uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+#define LPFC_SLI4_HANDLER_NAME_SZ	16
+struct lpfc_hba_eq_hdl {
+	uint32_t idx;
+	char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
+	struct lpfc_hba *phba;
+	atomic_t hba_eq_in_use;
+	struct cpumask *cpumask;
+	/* CPU affinitsed to or 0xffffffff if multiple */
+	uint32_t cpu;
+#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
+};
+
+/*BB Credit recovery value*/
+struct lpfc_bbscn_params {
+	uint32_t word0;
+#define lpfc_bbscn_min_SHIFT		0
+#define lpfc_bbscn_min_MASK		0x0000000F
+#define lpfc_bbscn_min_WORD		word0
+#define lpfc_bbscn_max_SHIFT		4
+#define lpfc_bbscn_max_MASK		0x0000000F
+#define lpfc_bbscn_max_WORD		word0
+#define lpfc_bbscn_def_SHIFT		8
+#define lpfc_bbscn_def_MASK		0x0000000F
+#define lpfc_bbscn_def_WORD		word0
+};
+
+/* Port Capabilities for SLI4 Parameters */
+struct lpfc_pc_sli4_params {
+	uint32_t supported;
+	uint32_t if_type;
+	uint32_t sli_rev;
+	uint32_t sli_family;
+	uint32_t featurelevel_1;
+	uint32_t featurelevel_2;
+	uint32_t proto_types;
+#define LPFC_SLI4_PROTO_FCOE	0x0000001
+#define LPFC_SLI4_PROTO_FC	0x0000002
+#define LPFC_SLI4_PROTO_NIC	0x0000004
+#define LPFC_SLI4_PROTO_ISCSI	0x0000008
+#define LPFC_SLI4_PROTO_RDMA	0x0000010
+	uint32_t sge_supp_len;
+	uint32_t if_page_sz;
+	uint32_t rq_db_window;
+	uint32_t loopbk_scope;
+	uint32_t oas_supported;
+	uint32_t eq_pages_max;
+	uint32_t eqe_size;
+	uint32_t cq_pages_max;
+	uint32_t cqe_size;
+	uint32_t mq_pages_max;
+	uint32_t mqe_size;
+	uint32_t mq_elem_cnt;
+	uint32_t wq_pages_max;
+	uint32_t wqe_size;
+	uint32_t rq_pages_max;
+	uint32_t rqe_size;
+	uint32_t hdr_pages_max;
+	uint32_t hdr_size;
+	uint32_t hdr_pp_align;
+	uint32_t sgl_pages_max;
+	uint32_t sgl_pp_align;
+	uint8_t cqv;
+	uint8_t mqv;
+	uint8_t wqv;
+	uint8_t rqv;
+	uint8_t wqsize;
+#define LPFC_WQ_SZ64_SUPPORT	1
+#define LPFC_WQ_SZ128_SUPPORT	2
+	uint8_t wqpcnt;
+};
+
+struct lpfc_iov {
+	uint32_t pf_number;
+	uint32_t vf_number;
+};
+
+struct lpfc_sli4_lnk_info {
+	uint8_t lnk_dv;
+#define LPFC_LNK_DAT_INVAL	0
+#define LPFC_LNK_DAT_VAL	1
+	uint8_t lnk_tp;
+#define LPFC_LNK_GE	0x0 /* FCoE */
+#define LPFC_LNK_FC	0x1 /* FC   */
+	uint8_t lnk_no;
+	uint8_t optic_state;
+};
+
+#define LPFC_SLI4_HANDLER_CNT		(LPFC_HBA_IO_CHAN_MAX+ \
+					 LPFC_FOF_IO_CHAN_NUM)
+
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+	uint16_t	phys_id;
+	uint16_t	core_id;
+	uint16_t	irq;
+	uint16_t	channel_id;
+};
+#define LPFC_VECTOR_MAP_EMPTY	0xffff
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR0, config space registers */
+	void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR1, control registers */
+	void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR2, doorbell registers */
+	union {
+		struct {
+			/* IF Type 0, BAR 0 PCI cfg space reg mem map */
+			void __iomem *UERRLOregaddr;
+			void __iomem *UERRHIregaddr;
+			void __iomem *UEMASKLOregaddr;
+			void __iomem *UEMASKHIregaddr;
+		} if_type0;
+		struct {
+			/* IF Type 2, BAR 0 PCI cfg space reg mem map. */
+			void __iomem *STATUSregaddr;
+			void __iomem *CTRLregaddr;
+			void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1		0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2		0x2
+			void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART		0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON		0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP		0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ		0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ		0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS		0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ		0x6
+			void __iomem *EQDregaddr;
+		} if_type2;
+	} u;
+
+	/* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
+	void __iomem *PSMPHRregaddr;
+
+	/* Well-known SLI INTF register memory map. */
+	void __iomem *SLIINTFregaddr;
+
+	/* IF type 0, BAR 1 function CSR register memory map */
+	void __iomem *ISRregaddr;	/* HST_ISR register */
+	void __iomem *IMRregaddr;	/* HST_IMR register */
+	void __iomem *ISCRregaddr;	/* HST_ISCR register */
+	/* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
+	void __iomem *RQDBregaddr;	/* RQ_DOORBELL register */
+	void __iomem *WQDBregaddr;	/* WQ_DOORBELL register */
+	void __iomem *EQCQDBregaddr;	/* EQCQ_DOORBELL register */
+	void __iomem *MQDBregaddr;	/* MQ_DOORBELL register */
+	void __iomem *BMBXregaddr;	/* BootStrap MBX register */
+
+	uint32_t ue_mask_lo;
+	uint32_t ue_mask_hi;
+	uint32_t ue_to_sr;
+	uint32_t ue_to_rp;
+	struct lpfc_register sli_intf;
+	struct lpfc_pc_sli4_params pc_sli4_params;
+	struct lpfc_bbscn_params bbscn_params;
+	struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
+
+	/* Pointers to the constructed SLI4 queues */
+	struct lpfc_queue **hba_eq;  /* Event queues for HBA */
+	struct lpfc_queue **fcp_cq;  /* Fast-path FCP compl queue */
+	struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
+	struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
+	struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
+	struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
+	struct lpfc_queue **fcp_wq;  /* Fast-path FCP work queue */
+	struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
+	uint16_t *fcp_cq_map;
+	uint16_t *nvme_cq_map;
+	struct list_head lpfc_wq_list;
+
+	struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+	struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+	struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */
+	struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+	struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+	struct lpfc_queue *nvmels_wq; /* NVME LS work queue */
+	struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+	struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+
+	struct lpfc_name wwnn;
+	struct lpfc_name wwpn;
+
+	uint32_t fw_func_mode;	/* FW function protocol mode */
+	uint32_t ulp0_mode;	/* ULP0 protocol mode */
+	uint32_t ulp1_mode;	/* ULP1 protocol mode */
+
+	struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
+
+	/* Optimized Access Storage specific queues/structures */
+
+	struct lpfc_queue *oas_cq; /* OAS completion queue */
+	struct lpfc_queue *oas_wq; /* OAS Work queue */
+	struct lpfc_sli_ring *oas_ring;
+	uint64_t oas_next_lun;
+	uint8_t oas_next_tgt_wwpn[8];
+	uint8_t oas_next_vpt_wwpn[8];
+
+	/* Setup information for various queue parameters */
+	int eq_esize;
+	int eq_ecount;
+	int cq_esize;
+	int cq_ecount;
+	int wq_esize;
+	int wq_ecount;
+	int mq_esize;
+	int mq_ecount;
+	int rq_esize;
+	int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC         10000
+#define LPFC_FP_EQ_MAX_INTR_SEC         10000
+
+	uint32_t intr_enable;
+	struct lpfc_bmbx bmbx;
+	struct lpfc_max_cfg_param max_cfg_param;
+	uint16_t extents_in_use; /* must allocate resource extents. */
+	uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
+	uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+	uint16_t next_rpi;
+	uint16_t nvme_xri_max;
+	uint16_t nvme_xri_cnt;
+	uint16_t nvme_xri_start;
+	uint16_t scsi_xri_max;
+	uint16_t scsi_xri_cnt;
+	uint16_t scsi_xri_start;
+	uint16_t els_xri_cnt;
+	uint16_t nvmet_xri_cnt;
+	uint16_t nvmet_io_wait_cnt;
+	uint16_t nvmet_io_wait_total;
+	struct list_head lpfc_els_sgl_list;
+	struct list_head lpfc_abts_els_sgl_list;
+	struct list_head lpfc_nvmet_sgl_list;
+	struct list_head lpfc_abts_nvmet_ctx_list;
+	struct list_head lpfc_abts_scsi_buf_list;
+	struct list_head lpfc_abts_nvme_buf_list;
+	struct list_head lpfc_nvmet_io_wait_list;
+	struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
+	struct lpfc_sglq **lpfc_sglq_active_list;
+	struct list_head lpfc_rpi_hdr_list;
+	unsigned long *rpi_bmask;
+	uint16_t *rpi_ids;
+	uint16_t rpi_count;
+	struct list_head lpfc_rpi_blk_list;
+	unsigned long *xri_bmask;
+	uint16_t *xri_ids;
+	struct list_head lpfc_xri_blk_list;
+	unsigned long *vfi_bmask;
+	uint16_t *vfi_ids;
+	uint16_t vfi_count;
+	struct list_head lpfc_vfi_blk_list;
+	struct lpfc_sli4_flags sli4_flags;
+	struct list_head sp_queue_event;
+	struct list_head sp_cqe_event_pool;
+	struct list_head sp_asynce_work_queue;
+	struct list_head sp_fcp_xri_aborted_work_queue;
+	struct list_head sp_els_xri_aborted_work_queue;
+	struct list_head sp_nvme_xri_aborted_work_queue;
+	struct list_head sp_unsol_work_queue;
+	struct lpfc_sli4_link link_state;
+	struct lpfc_sli4_lnk_info lnk_info;
+	uint32_t pport_name_sta;
+#define LPFC_SLI4_PPNAME_NON	0
+#define LPFC_SLI4_PPNAME_GET	1
+	struct lpfc_iov iov;
+	spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
+	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+	spinlock_t sgl_list_lock; /* list of aborted els IOs */
+	spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
+	uint32_t physical_port;
+
+	/* CPU to vector mapping information */
+	struct lpfc_vector_map_info *cpu_map;
+	uint16_t num_online_cpu;
+	uint16_t num_present_cpu;
+	uint16_t curr_disp_cpu;
+};
+
+enum lpfc_sge_type {
+	GEN_BUFF_TYPE,
+	SCSI_BUFF_TYPE,
+	NVMET_BUFF_TYPE
+};
+
+enum lpfc_sgl_state {
+	SGL_FREED,
+	SGL_ALLOCATED,
+	SGL_XRI_ABORTED
+};
+
+struct lpfc_sglq {
+	/* lpfc_sglqs are used in double linked lists */
+	struct list_head list;
+	struct list_head clist;
+	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+	enum lpfc_sgl_state state;
+	struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
+	uint16_t iotag;         /* pre-assigned IO tag */
+	uint16_t sli4_lxritag;  /* logical pre-assigned xri. */
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	struct sli4_sge *sgl;	/* pre-assigned SGL */
+	void *virt;		/* virtual address. */
+	dma_addr_t phys;	/* physical address */
+};
+
+struct lpfc_rpi_hdr {
+	struct list_head list;
+	uint32_t len;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t page_count;
+	uint32_t start_rpi;
+	uint16_t next_rpi;
+};
+
+struct lpfc_rsrc_blks {
+	struct list_head list;
+	uint16_t rsrc_start;
+	uint16_t rsrc_size;
+	uint16_t rsrc_used;
+};
+
+struct lpfc_rdp_context {
+	struct lpfc_nodelist *ndlp;
+	uint16_t ox_id;
+	uint16_t rx_id;
+	READ_LNK_VAR link_stat;
+	uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE];
+	uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE];
+	void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int);
+};
+
+struct lpfc_lcb_context {
+	uint8_t  sub_command;
+	uint8_t  type;
+	uint8_t  frequency;
+	uint16_t ox_id;
+	uint16_t rx_id;
+	struct lpfc_nodelist *ndlp;
+};
+
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+		     uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+			   struct lpfc_mbx_sge *);
+int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
+			       uint16_t);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+			uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
+int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+			     uint32_t numq, uint32_t imax);
+int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t, uint32_t);
+int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
+			struct lpfc_queue **eqp, uint32_t type,
+			uint32_t subtype);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+		       struct lpfc_queue *, uint32_t);
+int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t);
+int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
+			struct lpfc_queue **drqp, struct lpfc_queue **cqp,
+			uint32_t subtype);
+int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+			 struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+void lpfc_sli4_free_xri(struct lpfc_hba *, int);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
+			void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+				struct sli4_wcqe_xri_aborted *axri);
+void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+				 struct sli4_wcqe_xri_aborted *axri);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
+int lpfc_sli4_init_vpi(struct lpfc_vport *);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_version.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_version.h
new file mode 100644
index 0000000..6aa192b
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_version.h
@@ -0,0 +1,37 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_DRIVER_VERSION "11.4.0.3"
+#define LPFC_DRIVER_NAME		"lpfc"
+
+/* Used for SLI 2/3 */
+#define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
+#define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
+
+/* Used for SLI4 */
+#define LPFC_DRIVER_HANDLER_NAME	"lpfc:"
+
+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
+		LPFC_DRIVER_VERSION
+#define LPFC_COPYRIGHT "Copyright (C) 2017 Broadcom. All Rights Reserved. " \
+		"The term \"Broadcom\" refers to Broadcom Limited " \
+		"and/or its subsidiaries."
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_vport.c b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 0000000..9c738e2
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,930 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sched/signal.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+
+inline void lpfc_vport_set_state(struct lpfc_vport *vport,
+				 enum fc_vport_state new_state)
+{
+	struct fc_vport *fc_vport = vport->fc_vport;
+
+	if (fc_vport) {
+		/*
+		 * When the transport defines fc_vport_set state we will replace
+		 * this code with the following line
+		 */
+		/* fc_vport_set_state(fc_vport, new_state); */
+		if (new_state != FC_VPORT_INITIALIZING)
+			fc_vport->vport_last_state = fc_vport->vport_state;
+		fc_vport->vport_state = new_state;
+	}
+
+	/* for all the error states we will set the invternal state to FAILED */
+	switch (new_state) {
+	case FC_VPORT_NO_FABRIC_SUPP:
+	case FC_VPORT_NO_FABRIC_RSCS:
+	case FC_VPORT_FABRIC_LOGOUT:
+	case FC_VPORT_FABRIC_REJ_WWN:
+	case FC_VPORT_FAILED:
+		vport->port_state = LPFC_VPORT_FAILED;
+		break;
+	case FC_VPORT_LINKDOWN:
+		vport->port_state = LPFC_VPORT_UNKNOWN;
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+}
+
+int
+lpfc_alloc_vpi(struct lpfc_hba *phba)
+{
+	unsigned long vpi;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Start at bit 1 because vpi zero is reserved for the physical port */
+	vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
+	if (vpi > phba->max_vpi)
+		vpi = 0;
+	else
+		set_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used++;
+	spin_unlock_irq(&phba->hbalock);
+	return vpi;
+}
+
+static void
+lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
+{
+	if (vpi == 0)
+		return;
+	spin_lock_irq(&phba->hbalock);
+	clear_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used--;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+static int
+lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *mp;
+	int  rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+
+	rc = lpfc_read_sparam(phba, pmb, vport->vpi);
+	if (rc) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Grab buffer pointer and clear context1 so we can use
+	 * lpfc_sli_issue_box_wait
+	 */
+	mp = (struct lpfc_dmabuf *) pmb->context1;
+	pmb->context1 = NULL;
+
+	pmb->vport = vport;
+	rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
+	if (rc != MBX_SUCCESS) {
+		if (signal_pending(current)) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+					 "1830 Signal aborted mbxCmd x%x\n",
+					 mb->mbxCommand);
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+			if (rc != MBX_TIMEOUT)
+				mempool_free(pmb, phba->mbox_mem_pool);
+			return -EINTR;
+		} else {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+					 "1818 VPort failed init, mbxCmd x%x "
+					 "READ_SPARM mbxStatus x%x, rc = x%x\n",
+					 mb->mbxCommand, mb->mbxStatus, rc);
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+			if (rc != MBX_TIMEOUT)
+				mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	}
+
+	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+	       sizeof (struct lpfc_name));
+	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+	       sizeof (struct lpfc_name));
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return 0;
+}
+
+static int
+lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
+		      const char *name_type)
+{
+				/* ensure that IEEE format 1 addresses
+				 * contain zeros in bits 59-48
+				 */
+	if (!((wwn->u.wwn[0] >> 4) == 1 &&
+	      ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
+		return 1;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+			"1822 Invalid %s: %02x:%02x:%02x:%02x:"
+			"%02x:%02x:%02x:%02x\n",
+			name_type,
+			wwn->u.wwn[0], wwn->u.wwn[1],
+			wwn->u.wwn[2], wwn->u.wwn[3],
+			wwn->u.wwn[4], wwn->u.wwn[5],
+			wwn->u.wwn[6], wwn->u.wwn[7]);
+	return 0;
+}
+
+static int
+lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
+{
+	struct lpfc_vport *vport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport, &phba->port_list, listentry) {
+		if (vport == new_vport)
+			continue;
+		/* If they match, return not unique */
+		if (memcmp(&vport->fc_sparam.portName,
+			   &new_vport->fc_sparam.portName,
+			   sizeof(struct lpfc_name)) == 0) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return 1;
+}
+
+/**
+ * lpfc_discovery_wait - Wait for driver discovery to quiesce
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This driver calls this routine specifically from lpfc_vport_delete
+ * to enforce a synchronous execution of vport
+ * delete relative to discovery activities.  The
+ * lpfc_vport_delete routine should not return until it
+ * can reasonably guarantee that discovery has quiesced.
+ * Post FDISC LOGO, the driver must wait until its SAN teardown is
+ * complete and all resources recovered before allowing
+ * cleanup.
+ *
+ * This routine does not require any locks held.
+ **/
+static void lpfc_discovery_wait(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t wait_flags = 0;
+	unsigned long wait_time_max;
+	unsigned long start_time;
+
+	wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
+		     FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
+
+	/*
+	 * The time constraint on this loop is a balance between the
+	 * fabric RA_TOV value and dev_loss tmo.  The driver's
+	 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
+	 */
+	wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+	wait_time_max += jiffies;
+	start_time = jiffies;
+	while (time_before(jiffies, wait_time_max)) {
+		if ((vport->num_disc_nodes > 0)    ||
+		    (vport->fc_flag & wait_flags)  ||
+		    ((vport->port_state > LPFC_VPORT_FAILED) &&
+		     (vport->port_state < LPFC_VPORT_READY))) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+					"1833 Vport discovery quiesce Wait:"
+					" state x%x fc_flags x%x"
+					" num_nodes x%x, waiting 1000 msecs"
+					" total wait msecs x%x\n",
+					vport->port_state, vport->fc_flag,
+					vport->num_disc_nodes,
+					jiffies_to_msecs(jiffies - start_time));
+			msleep(1000);
+		} else {
+			/* Base case.  Wait variants satisfied.  Break out */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+					 "1834 Vport discovery quiesced:"
+					 " state x%x fc_flags x%x"
+					 " wait msecs x%x\n",
+					 vport->port_state, vport->fc_flag,
+					 jiffies_to_msecs(jiffies
+						- start_time));
+			break;
+		}
+	}
+
+	if (time_after(jiffies, wait_time_max))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				"1835 Vport discovery quiesce failed:"
+				" state x%x fc_flags x%x wait msecs x%x\n",
+				vport->port_state, vport->fc_flag,
+				jiffies_to_msecs(jiffies - start_time));
+}
+
+int
+lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = fc_vport->shost;
+	struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = pport->phba;
+	struct lpfc_vport *vport = NULL;
+	int instance;
+	int vpi;
+	int rc = VPORT_ERROR;
+	int status;
+
+	if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1808 Create VPORT failed: "
+				"NPIV is not enabled: SLImode:%d\n",
+				phba->sli_rev);
+		rc = VPORT_INVAL;
+		goto error_out;
+	}
+
+	vpi = lpfc_alloc_vpi(phba);
+	if (vpi == 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1809 Create VPORT failed: "
+				"Max VPORTs (%d) exceeded\n",
+				phba->max_vpi);
+		rc = VPORT_NORESOURCES;
+		goto error_out;
+	}
+
+	/* Assign an unused board number */
+	if ((instance = lpfc_get_instance()) < 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1810 Create VPORT failed: Cannot get "
+				"instance number\n");
+		lpfc_free_vpi(phba, vpi);
+		rc = VPORT_NORESOURCES;
+		goto error_out;
+	}
+
+	vport = lpfc_create_port(phba, instance, &fc_vport->dev);
+	if (!vport) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1811 Create VPORT failed: vpi x%x\n", vpi);
+		lpfc_free_vpi(phba, vpi);
+		rc = VPORT_NORESOURCES;
+		goto error_out;
+	}
+
+	vport->vpi = vpi;
+	lpfc_debugfs_initialize(vport);
+
+	if ((status = lpfc_vport_sparm(phba, vport))) {
+		if (status == -EINTR) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+					 "1831 Create VPORT Interrupted.\n");
+			rc = VPORT_ERROR;
+		} else {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+					 "1813 Create VPORT failed. "
+					 "Cannot get sparam\n");
+			rc = VPORT_NORESOURCES;
+		}
+		lpfc_free_vpi(phba, vpi);
+		destroy_port(vport);
+		goto error_out;
+	}
+
+	u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+	u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+
+	memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
+	memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
+
+	if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
+	    !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1821 Create VPORT failed. "
+				 "Invalid WWN format\n");
+		lpfc_free_vpi(phba, vpi);
+		destroy_port(vport);
+		rc = VPORT_INVAL;
+		goto error_out;
+	}
+
+	if (!lpfc_unique_wwpn(phba, vport)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1823 Create VPORT failed. "
+				 "Duplicate WWN on HBA\n");
+		lpfc_free_vpi(phba, vpi);
+		destroy_port(vport);
+		rc = VPORT_INVAL;
+		goto error_out;
+	}
+
+	/* Create binary sysfs attribute for vport */
+	lpfc_alloc_sysfs_attr(vport);
+
+	/* Set the DFT_LUN_Q_DEPTH accordingly */
+	vport->cfg_lun_queue_depth  = phba->pport->cfg_lun_queue_depth;
+
+	*(struct lpfc_vport **)fc_vport->dd_data = vport;
+	vport->fc_vport = fc_vport;
+
+	/* At this point we are fully registered with SCSI Layer.  */
+	vport->load_flag |= FC_ALLOW_FDMI;
+	if (phba->cfg_enable_SmartSAN ||
+	    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
+		/* Setup appropriate attribute masks */
+		vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
+		vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
+	}
+
+	if ((phba->nvmet_support == 0) &&
+	    ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+	     (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
+		/* Create NVME binding with nvme_fc_transport. This
+		 * ensures the vport is initialized.
+		 */
+		rc = lpfc_nvme_create_localport(vport);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"6003 %s status x%x\n",
+					"NVME registration failed, ",
+					rc);
+			goto error_out;
+		}
+	}
+
+	/*
+	 * In SLI4, the vpi must be activated before it can be used
+	 * by the port.
+	 */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    (pport->fc_flag & FC_VFI_REGISTERED)) {
+		rc = lpfc_sli4_init_vpi(vport);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+					"1838 Failed to INIT_VPI on vpi %d "
+					"status %d\n", vpi, rc);
+			rc = VPORT_NORESOURCES;
+			lpfc_free_vpi(phba, vpi);
+			goto error_out;
+		}
+	} else if (phba->sli_rev == LPFC_SLI_REV4) {
+		/*
+		 * Driver cannot INIT_VPI now. Set the flags to
+		 * init_vpi when reg_vfi complete.
+		 */
+		vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		rc = VPORT_OK;
+		goto out;
+	}
+
+	if ((phba->link_state < LPFC_LINK_UP) ||
+	    (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
+	    (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		rc = VPORT_OK;
+		goto out;
+	}
+
+	if (disable) {
+		lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+		rc = VPORT_OK;
+		goto out;
+	}
+
+	/* Use the Physical nodes Fabric NDLP to determine if the link is
+	 * up and ready to FDISC.
+	 */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+		if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+			lpfc_set_disctmo(vport);
+			lpfc_initial_fdisc(vport);
+		} else {
+			lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0262 No NPIV Fabric support\n");
+		}
+	} else {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	}
+	rc = VPORT_OK;
+
+out:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			"1825 Vport Created.\n");
+	lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
+error_out:
+	return rc;
+}
+
+static int
+disable_vport(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+	long timeout;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+	    && phba->link_state >= LPFC_LINK_UP) {
+		vport->unreg_vpi_cmpl = VPORT_INVAL;
+		timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+		if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+			while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+				timeout = schedule_timeout(timeout);
+	}
+
+	lpfc_sli_host_down(vport);
+
+	/* Mark all nodes for discovery so we can remove them by
+	 * calling lpfc_cleanup_rpis(vport, 1)
+	 */
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					NLP_EVT_DEVICE_RECOVERY);
+	}
+	lpfc_cleanup_rpis(vport, 1);
+
+	lpfc_stop_vport_timers(vport);
+	lpfc_unreg_all_rpis(vport);
+	lpfc_unreg_default_rpis(vport);
+	/*
+	 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+	 * scsi_host_put() to release the vport.
+	 */
+	lpfc_mbx_unreg_vpi(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			 "1826 Vport Disabled.\n");
+	return VPORT_OK;
+}
+
+static int
+enable_vport(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if ((phba->link_state < LPFC_LINK_UP) ||
+	    (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		return VPORT_OK;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_LOADING;
+	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+		spin_unlock_irq(shost->host_lock);
+		lpfc_issue_init_vpi(vport);
+		goto out;
+	}
+
+	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	/* Use the Physical nodes Fabric NDLP to determine if the link is
+	 * up and ready to FDISC.
+	 */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+		if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+			lpfc_set_disctmo(vport);
+			lpfc_initial_fdisc(vport);
+		} else {
+			lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0264 No NPIV Fabric support\n");
+		}
+	} else {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	}
+
+out:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			 "1827 Vport Enabled.\n");
+	return VPORT_OK;
+}
+
+int
+lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+	if (disable)
+		return disable_vport(fc_vport);
+	else
+		return enable_vport(fc_vport);
+}
+
+
+int
+lpfc_vport_delete(struct fc_vport *fc_vport)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	long timeout;
+	bool ns_ndlp_referenced = false;
+
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1812 vport_delete failed: Cannot delete "
+				 "physical host\n");
+		return VPORT_ERROR;
+	}
+
+	/* If the vport is a static vport fail the deletion. */
+	if ((vport->vport_flag & STATIC_VPORT) &&
+		!(phba->pport->load_flag & FC_UNLOADING)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1837 vport_delete failed: Cannot delete "
+				 "static vport.\n");
+		return VPORT_ERROR;
+	}
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+	/*
+	 * If we are not unloading the driver then prevent the vport_delete
+	 * from happening until after this vport's discovery is finished.
+	 */
+	if (!(phba->pport->load_flag & FC_UNLOADING)) {
+		int check_count = 0;
+		while (check_count < ((phba->fc_ratov * 3) + 3) &&
+		       vport->port_state > LPFC_VPORT_FAILED &&
+		       vport->port_state < LPFC_VPORT_READY) {
+			check_count++;
+			msleep(1000);
+		}
+		if (vport->port_state > LPFC_VPORT_FAILED &&
+		    vport->port_state < LPFC_VPORT_READY)
+			return -EAGAIN;
+	}
+
+	/*
+	 * Take early refcount for outstanding I/O requests we schedule during
+	 * delete processing for unreg_vpi.  Always keep this before
+	 * scsi_remove_host() as we can no longer obtain a reference through
+	 * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
+	 */
+	if (!scsi_host_get(shost))
+		return VPORT_INVAL;
+
+	lpfc_free_sysfs_attr(vport);
+
+	lpfc_debugfs_terminate(vport);
+
+	/*
+	 * The call to fc_remove_host might release the NameServer ndlp. Since
+	 * we might need to use the ndlp to send the DA_ID CT command,
+	 * increment the reference for the NameServer ndlp to prevent it from
+	 * being released.
+	 */
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_nlp_get(ndlp);
+		ns_ndlp_referenced = true;
+	}
+
+	/* Remove FC host and then SCSI host with the vport */
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+
+	/* In case of driver unload, we shall not perform fabric logo as the
+	 * worker thread already stopped at this stage and, in this case, we
+	 * can safely skip the fabric logo.
+	 */
+	if (phba->pport->load_flag & FC_UNLOADING) {
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+		    phba->link_state >= LPFC_LINK_UP) {
+			/* First look for the Fabric ndlp */
+			ndlp = lpfc_findnode_did(vport, Fabric_DID);
+			if (!ndlp)
+				goto skip_logo;
+			else if (!NLP_CHK_NODE_ACT(ndlp)) {
+				ndlp = lpfc_enable_node(vport, ndlp,
+							NLP_STE_UNUSED_NODE);
+				if (!ndlp)
+					goto skip_logo;
+			}
+			/* Remove ndlp from vport npld list */
+			lpfc_dequeue_node(vport, ndlp);
+
+			/* Indicate free memory when release */
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+			/* Kick off release ndlp when it can be safely done */
+			lpfc_nlp_put(ndlp);
+		}
+		goto skip_logo;
+	}
+
+	/* Otherwise, we will perform fabric logo as needed */
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+	    phba->link_state >= LPFC_LINK_UP &&
+	    phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+		if (vport->cfg_enable_da_id) {
+			timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+			if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
+				while (vport->ct_flags && timeout)
+					timeout = schedule_timeout(timeout);
+			else
+				lpfc_printf_log(vport->phba, KERN_WARNING,
+						LOG_VPORT,
+						"1829 CT command failed to "
+						"delete objects on fabric\n");
+		}
+		/* First look for the Fabric ndlp */
+		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		if (!ndlp) {
+			/* Cannot find existing Fabric ndlp, allocate one */
+			ndlp = lpfc_nlp_init(vport, Fabric_DID);
+			if (!ndlp)
+				goto skip_logo;
+			/* Indicate free memory when release */
+			NLP_SET_FREE_REQ(ndlp);
+		} else {
+			if (!NLP_CHK_NODE_ACT(ndlp)) {
+				ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+				if (!ndlp)
+					goto skip_logo;
+			}
+
+			/* Remove ndlp from vport list */
+			lpfc_dequeue_node(vport, ndlp);
+			spin_lock_irq(&phba->ndlp_lock);
+			if (!NLP_CHK_FREE_REQ(ndlp))
+				/* Indicate free memory when release */
+				NLP_SET_FREE_REQ(ndlp);
+			else {
+				/* Skip this if ndlp is already in free mode */
+				spin_unlock_irq(&phba->ndlp_lock);
+				goto skip_logo;
+			}
+			spin_unlock_irq(&phba->ndlp_lock);
+		}
+
+		/*
+		 * If the vpi is not registered, then a valid FDISC doesn't
+		 * exist and there is no need for a ELS LOGO.  Just cleanup
+		 * the ndlp.
+		 */
+		if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+			lpfc_nlp_put(ndlp);
+			goto skip_logo;
+		}
+
+		vport->unreg_vpi_cmpl = VPORT_INVAL;
+		timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+		if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+			while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+				timeout = schedule_timeout(timeout);
+	}
+
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_discovery_wait(vport);
+
+skip_logo:
+
+	/*
+	 * If the NameServer ndlp has been incremented to allow the DA_ID CT
+	 * command to be sent, decrement the ndlp now.
+	 */
+	if (ns_ndlp_referenced) {
+		ndlp = lpfc_findnode_did(vport, NameServer_DID);
+		lpfc_nlp_put(ndlp);
+	}
+
+	lpfc_cleanup(vport);
+	lpfc_sli_host_down(vport);
+
+	lpfc_stop_vport_timers(vport);
+
+	if (!(phba->pport->load_flag & FC_UNLOADING)) {
+		lpfc_unreg_all_rpis(vport);
+		lpfc_unreg_default_rpis(vport);
+		/*
+		 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
+		 * does the scsi_host_put() to release the vport.
+		 */
+		if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
+				lpfc_mbx_unreg_vpi(vport))
+			scsi_host_put(shost);
+	} else {
+		scsi_host_put(shost);
+	}
+
+	lpfc_free_vpi(phba, vport->vpi);
+	vport->work_port_events = 0;
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			 "1828 Vport Deleted.\n");
+	scsi_host_put(shost);
+	return VPORT_OK;
+}
+
+struct lpfc_vport **
+lpfc_create_vport_work_array(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *port_iterator;
+	struct lpfc_vport **vports;
+	int index = 0;
+	vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
+			 GFP_KERNEL);
+	if (vports == NULL)
+		return NULL;
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+		if (port_iterator->load_flag & FC_UNLOADING)
+			continue;
+		if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
+			lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
+					 "1801 Create vport work array FAILED: "
+					 "cannot do scsi_host_get\n");
+			continue;
+		}
+		vports[index++] = port_iterator;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return vports;
+}
+
+void
+lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
+{
+	int i;
+	if (vports == NULL)
+		return;
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+		scsi_host_put(lpfc_shost_from_vport(vports[i]));
+	kfree(vports);
+}
+
+
+/**
+ * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
+ * @vport: Pointer to vport object.
+ *
+ * This function resets the statistical data for the vport. This function
+ * is called with the host_lock held
+ **/
+void
+lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->lat_data)
+			memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
+				sizeof(struct lpfc_scsicmd_bkt));
+	}
+}
+
+
+/**
+ * lpfc_alloc_bucket - Allocate data buffer required for statistical data
+ * @vport: Pointer to vport object.
+ *
+ * This function allocates data buffer required for all the FC
+ * nodes of the vport to collect statistical data.
+ **/
+void
+lpfc_alloc_bucket(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+
+		kfree(ndlp->lat_data);
+		ndlp->lat_data = NULL;
+
+		if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+			ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+					 sizeof(struct lpfc_scsicmd_bkt),
+					 GFP_ATOMIC);
+
+			if (!ndlp->lat_data)
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+					"0287 lpfc_alloc_bucket failed to "
+					"allocate statistical data buffer DID "
+					"0x%x\n", ndlp->nlp_DID);
+		}
+	}
+}
+
+/**
+ * lpfc_free_bucket - Free data buffer required for statistical data
+ * @vport: Pointer to vport object.
+ *
+ * Th function frees statistical data buffer of all the FC
+ * nodes of the vport.
+ **/
+void
+lpfc_free_bucket(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+
+		kfree(ndlp->lat_data);
+		ndlp->lat_data = NULL;
+	}
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_vport.h b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 0000000..6229597
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,122 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
+ * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.broadcom.com                                                *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#ifndef _H_LPFC_VPORT
+#define _H_LPFC_VPORT
+
+/* API version values (each will be an individual bit) */
+#define VPORT_API_VERSION_1	0x01
+
+/* Values returned via lpfc_vport_getinfo() */
+struct vport_info {
+
+	uint32_t api_versions;
+	uint8_t linktype;
+#define  VPORT_TYPE_PHYSICAL	0
+#define  VPORT_TYPE_VIRTUAL	1
+
+	uint8_t state;
+#define  VPORT_STATE_OFFLINE	0
+#define  VPORT_STATE_ACTIVE	1
+#define  VPORT_STATE_FAILED	2
+
+	uint8_t fail_reason;
+	uint8_t prev_fail_reason;
+#define  VPORT_FAIL_UNKNOWN	0
+#define  VPORT_FAIL_LINKDOWN	1
+#define  VPORT_FAIL_FAB_UNSUPPORTED	2
+#define  VPORT_FAIL_FAB_NORESOURCES	3
+#define  VPORT_FAIL_FAB_LOGOUT	4
+#define  VPORT_FAIL_ADAP_NORESOURCES	5
+
+	uint8_t node_name[8];	/* WWNN */
+	uint8_t port_name[8];	/* WWPN */
+
+	struct Scsi_Host *shost;
+
+/* Following values are valid only on physical links */
+	uint32_t vports_max;
+	uint32_t vports_inuse;
+	uint32_t rpi_max;
+	uint32_t rpi_inuse;
+#define  VPORT_CNT_INVALID	0xFFFFFFFF
+};
+
+/* data used  in link creation */
+struct vport_data {
+	uint32_t api_version;
+
+	uint32_t options;
+#define  VPORT_OPT_AUTORETRY	0x01
+
+	uint8_t node_name[8];	/* WWNN */
+	uint8_t port_name[8];	/* WWPN */
+
+/*
+ *  Upon successful creation, vport_shost will point to the new Scsi_Host
+ *  structure for the new virtual link.
+ */
+	struct Scsi_Host *vport_shost;
+};
+
+/* API function return codes */
+#define VPORT_OK	0
+#define VPORT_ERROR	-1
+#define VPORT_INVAL	-2
+#define VPORT_NOMEM	-3
+#define VPORT_NORESOURCES	-4
+
+int lpfc_vport_create(struct fc_vport *, bool);
+int lpfc_vport_delete(struct fc_vport *);
+int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
+int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
+struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
+void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
+
+/*
+ *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
+ *  Returned when the virtual link has failed or is not active.
+ */
+#define  DID_VPORT_ERROR	0x0f
+
+#define VPORT_INFO	0x1
+#define VPORT_CREATE	0x2
+#define VPORT_DELETE	0x4
+
+struct vport_cmd_tag {
+	uint32_t cmd;
+	struct vport_data cdata;
+	struct vport_info cinfo;
+	void *vport;
+	int vport_num;
+};
+
+void lpfc_vport_set_state(struct lpfc_vport *vport,
+			  enum fc_vport_state new_state);
+
+void lpfc_vport_reset_stat_data(struct lpfc_vport *);
+void lpfc_alloc_bucket(struct lpfc_vport *);
+void lpfc_free_bucket(struct lpfc_vport *);
+
+#endif /* H_LPFC_VPORT */